Feat/add python version test envs (#2774)

This commit is contained in:
John Lockwood
2025-06-14 05:13:16 -07:00
committed by GitHub
parent a8ace18607
commit 7c0c4a03c4
6 changed files with 151 additions and 71 deletions

View File

@@ -40,10 +40,12 @@ class TestAddToVectorStoreErrors:
return memory
def test_empty_llm_response_fact_extraction(self, mock_memory, caplog):
def test_empty_llm_response_fact_extraction(self, mocker, mock_memory, caplog):
"""Test empty response from LLM during fact extraction"""
# Setup
mock_memory.llm.generate_response.return_value = ""
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
# Execute
with caplog.at_level(logging.ERROR):
@@ -52,9 +54,10 @@ class TestAddToVectorStoreErrors:
)
# Verify
assert mock_memory.llm.generate_response.call_count == 2
assert mock_memory.llm.generate_response.call_count == 1
assert result == [] # Should return empty list when no memories processed
assert "Error in new_retrieved_facts" in caplog.text
assert mock_capture_event.call_count == 1
def test_empty_llm_response_memory_actions(self, mock_memory, caplog):
"""Test empty response from LLM during memory actions"""
@@ -94,25 +97,31 @@ class TestAsyncAddToVectorStoreErrors:
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.return_value = ""
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.ERROR):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert mock_async_memory.llm.generate_response.call_count == 1
assert result == []
assert "Error in new_retrieved_facts" in caplog.text
assert mock_capture_event.call_count == 1
@pytest.mark.asyncio
async def test_async_empty_llm_response_memory_actions(self, mock_async_memory, caplog, mocker):
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.side_effect = ['{"facts": ["test fact"]}', ""]
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.ERROR):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert result == []
assert "Invalid JSON response" in caplog.text
assert mock_capture_event.call_count == 1

View File

@@ -19,13 +19,14 @@ def mock_openai():
def memory_instance():
with (
patch("mem0.utils.factory.EmbedderFactory") as mock_embedder,
patch("mem0.utils.factory.VectorStoreFactory") as mock_vector_store,
patch("mem0.memory.main.VectorStoreFactory") as mock_vector_store,
patch("mem0.utils.factory.LlmFactory") as mock_llm,
patch("mem0.memory.telemetry.capture_event"),
patch("mem0.memory.graph_memory.MemoryGraph"),
):
mock_embedder.create.return_value = Mock()
mock_vector_store.create.return_value = Mock()
mock_vector_store.create.return_value.search.return_value = []
mock_llm.create.return_value = Mock()
config = MemoryConfig(version="v1.1")
@@ -37,13 +38,14 @@ def memory_instance():
def memory_custom_instance():
with (
patch("mem0.utils.factory.EmbedderFactory") as mock_embedder,
patch("mem0.utils.factory.VectorStoreFactory") as mock_vector_store,
patch("mem0.memory.main.VectorStoreFactory") as mock_vector_store,
patch("mem0.utils.factory.LlmFactory") as mock_llm,
patch("mem0.memory.telemetry.capture_event"),
patch("mem0.memory.graph_memory.MemoryGraph"),
):
mock_embedder.create.return_value = Mock()
mock_vector_store.create.return_value = Mock()
mock_vector_store.create.return_value.search.return_value = []
mock_llm.create.return_value = Mock()
config = MemoryConfig(
@@ -250,7 +252,11 @@ def test_get_all(memory_instance, version, enable_graph, expected_result):
def test_custom_prompts(memory_custom_instance):
messages = [{"role": "user", "content": "Test message"}]
from mem0.embeddings.mock import MockEmbeddings
memory_custom_instance.llm.generate_response = Mock()
memory_custom_instance.llm.generate_response.return_value = '{"facts": ["fact1", "fact2"]}'
memory_custom_instance.embedding_model = MockEmbeddings()
with patch("mem0.memory.main.parse_messages", return_value="Test message") as mock_parse_messages:
with patch(
@@ -273,7 +279,7 @@ def test_custom_prompts(memory_custom_instance):
## custom update memory prompt
##
mock_get_update_memory_messages.assert_called_once_with(
[], [], memory_custom_instance.config.custom_update_memory_prompt
[], ["fact1", "fact2"], memory_custom_instance.config.custom_update_memory_prompt
)
memory_custom_instance.llm.generate_response.assert_any_call(