Feat/add python version test envs (#2774)

This commit is contained in:
John Lockwood
2025-06-14 05:13:16 -07:00
committed by GitHub
parent a8ace18607
commit 7c0c4a03c4
6 changed files with 151 additions and 71 deletions

View File

@@ -16,18 +16,19 @@ To make a contribution, follow these steps:
For more details about pull requests, please read [GitHub's guides](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request).
### 📦 Package manager
### 📦 Development Environment
We use `poetry` as our package manager. You can install poetry by following the instructions [here](https://python-poetry.org/docs/#installation).
Please DO NOT use pip or conda to install the dependencies. Instead, use poetry:
We use `hatch` for managing development environments. To set up:
```bash
make install_all
# Activate environment for specific Python version:
hatch shell dev_py_3_9 # Python 3.9
hatch shell dev_py_3_10 # Python 3.10
hatch shell dev_py_3_11 # Python 3.11
#activate
poetry shell
# The environment will automatically install all dev dependencies
# Run tests within the activated shell:
make test
```
### 📌 Pre-commit
@@ -40,16 +41,21 @@ pre-commit install
### 🧪 Testing
We use `pytest` to test our code. You can run the tests by running the following command:
We use `pytest` to test our code across multiple Python versions. You can run tests using:
```bash
poetry run pytest tests
# or
# Run tests with default Python version
make test
# Test specific Python versions:
make test-py-3.9 # Python 3.9 environment
make test-py-3.10 # Python 3.10 environment
make test-py-3.11 # Python 3.11 environment
# When using hatch shells, run tests with:
make test # After activating a shell with hatch shell test_XX
```
Several packages have been removed from Poetry to make the package lighter. Therefore, it is recommended to run `make install_all` to install the remaining packages and ensure all tests pass. Make sure that all tests pass before submitting a pull request.
Make sure that all tests pass across all supported Python versions before submitting a pull request.
We look forward to your pull requests and can't wait to see your contributions!

View File

@@ -41,3 +41,12 @@ clean:
test:
hatch run test
test-py-3.9:
hatch run dev_py_3_9:test
test-py-3.10:
hatch run dev_py_3_10:test
test-py-3.11:
hatch run dev_py_3_11:test

View File

@@ -28,8 +28,8 @@ from mem0.memory.utils import (
get_fact_retrieval_messages,
parse_messages,
parse_vision_messages,
remove_code_blocks,
process_telemetry_filters,
remove_code_blocks,
)
from mem0.utils.factory import EmbedderFactory, LlmFactory, VectorStoreFactory
@@ -341,7 +341,6 @@ class Memory(MemoryBase):
if not new_retrieved_facts:
logger.debug("No new facts retrieved from input. Skipping memory update LLM call.")
return []
retrieved_old_memory = []
new_message_embeddings = {}
@@ -369,6 +368,7 @@ class Memory(MemoryBase):
temp_uuid_mapping[str(idx)] = item["id"]
retrieved_old_memory[idx]["id"] = str(idx)
if new_retrieved_facts:
function_calling_prompt = get_update_memory_messages(
retrieved_old_memory, new_retrieved_facts, self.config.custom_update_memory_prompt
)
@@ -388,6 +388,8 @@ class Memory(MemoryBase):
except Exception as e:
logging.error(f"Invalid JSON response: {e}")
new_memories_with_actions = {}
else:
new_memories_with_actions = {}
returned_memories = []
try:
@@ -1162,13 +1164,11 @@ class AsyncMemory(MemoryBase):
response = remove_code_blocks(response)
new_retrieved_facts = json.loads(response)["facts"]
except Exception as e:
logging.error(f"Error in new_retrieved_facts: {e}")
new_retrieved_facts = []
if not new_retrieved_facts:
logger.info("No new facts retrieved from input. Skipping memory update LLM call.")
return []
logging.error(f"Error in new_retrieved_facts: {e}")
new_retrieved_facts = []
logger.debug("No new facts retrieved from input. Skipping memory update LLM call.")
retrieved_old_memory = []
new_message_embeddings = {}
@@ -1200,6 +1200,7 @@ class AsyncMemory(MemoryBase):
temp_uuid_mapping[str(idx)] = item["id"]
retrieved_old_memory[idx]["id"] = str(idx)
if new_retrieved_facts:
function_calling_prompt = get_update_memory_messages(
retrieved_old_memory, new_retrieved_facts, self.config.custom_update_memory_prompt
)
@@ -1210,19 +1211,12 @@ class AsyncMemory(MemoryBase):
response_format={"type": "json_object"},
)
except Exception as e:
response = ""
logging.error(f"Error in new memory actions response: {e}")
response = ""
try:
response = remove_code_blocks(response)
new_memories_with_actions = json.loads(response)
except Exception as e:
new_memories_with_actions = {}
if not new_memories_with_actions:
logger.info("No new facts retrieved from input (async). Skipping memory update LLM call.")
return []
logging.error(f"Invalid JSON response: {e}")
new_memories_with_actions = {}

View File

@@ -26,6 +26,32 @@ graph = [
"neo4j>=5.23.1",
"rank-bm25>=0.2.2",
]
vector_stores = [
"vecs>=0.4.0",
"chromadb>=0.4.24",
"weaviate-client>=4.4.0",
"pinecone<7.0.0",
"pinecone-text>=0.1.1",
"faiss-cpu>=1.7.4",
"upstash-vector>=0.1.0",
"azure-search-documents>=11.4.0b8",
]
llms = [
"groq>=0.3.0",
"together>=0.2.10",
"litellm>=0.1.0",
"ollama>=0.1.0",
"vertexai>=0.1.0",
"google-generativeai>=0.3.0",
]
extras = [
"boto3>=1.34.0",
"langchain-community>=0.0.0",
"sentence-transformers>=2.2.2",
"elasticsearch>=8.0.0",
"opensearch-py>=2.0.0",
"langchain-memgraph>=0.1.0",
]
test = [
"pytest>=8.2.2",
"pytest-mock>=3.14.0",
@@ -53,6 +79,36 @@ only-include = ["mem0"]
[tool.hatch.build.targets.wheel.shared-data]
"README.md" = "README.md"
[tool.hatch.envs.dev_py_3_9]
python = "3.9"
features = [
"test",
"graph",
"vector_stores",
"llms",
"extras",
]
[tool.hatch.envs.dev_py_3_10]
python = "3.10"
features = [
"test",
"graph",
"vector_stores",
"llms",
"extras",
]
[tool.hatch.envs.dev_py_3_11]
python = "3.11"
features = [
"test",
"graph",
"vector_stores",
"llms",
"extras",
]
[tool.hatch.envs.default.scripts]
format = [
"ruff format",

View File

@@ -40,10 +40,12 @@ class TestAddToVectorStoreErrors:
return memory
def test_empty_llm_response_fact_extraction(self, mock_memory, caplog):
def test_empty_llm_response_fact_extraction(self, mocker, mock_memory, caplog):
"""Test empty response from LLM during fact extraction"""
# Setup
mock_memory.llm.generate_response.return_value = ""
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
# Execute
with caplog.at_level(logging.ERROR):
@@ -52,9 +54,10 @@ class TestAddToVectorStoreErrors:
)
# Verify
assert mock_memory.llm.generate_response.call_count == 2
assert mock_memory.llm.generate_response.call_count == 1
assert result == [] # Should return empty list when no memories processed
assert "Error in new_retrieved_facts" in caplog.text
assert mock_capture_event.call_count == 1
def test_empty_llm_response_memory_actions(self, mock_memory, caplog):
"""Test empty response from LLM during memory actions"""
@@ -94,25 +97,31 @@ class TestAsyncAddToVectorStoreErrors:
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.return_value = ""
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.ERROR):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert mock_async_memory.llm.generate_response.call_count == 1
assert result == []
assert "Error in new_retrieved_facts" in caplog.text
assert mock_capture_event.call_count == 1
@pytest.mark.asyncio
async def test_async_empty_llm_response_memory_actions(self, mock_async_memory, caplog, mocker):
"""Test empty response in AsyncMemory._add_to_vector_store"""
mocker.patch("mem0.utils.factory.EmbedderFactory.create", return_value=MagicMock())
mock_async_memory.llm.generate_response.side_effect = ['{"facts": ["test fact"]}', ""]
mock_capture_event = mocker.MagicMock()
mocker.patch("mem0.memory.main.capture_event", mock_capture_event)
with caplog.at_level(logging.ERROR):
result = await mock_async_memory._add_to_vector_store(
messages=[{"role": "user", "content": "test"}], metadata={}, filters={}, infer=True
messages=[{"role": "user", "content": "test"}], metadata={}, effective_filters={}, infer=True
)
assert result == []
assert "Invalid JSON response" in caplog.text
assert mock_capture_event.call_count == 1

View File

@@ -19,13 +19,14 @@ def mock_openai():
def memory_instance():
with (
patch("mem0.utils.factory.EmbedderFactory") as mock_embedder,
patch("mem0.utils.factory.VectorStoreFactory") as mock_vector_store,
patch("mem0.memory.main.VectorStoreFactory") as mock_vector_store,
patch("mem0.utils.factory.LlmFactory") as mock_llm,
patch("mem0.memory.telemetry.capture_event"),
patch("mem0.memory.graph_memory.MemoryGraph"),
):
mock_embedder.create.return_value = Mock()
mock_vector_store.create.return_value = Mock()
mock_vector_store.create.return_value.search.return_value = []
mock_llm.create.return_value = Mock()
config = MemoryConfig(version="v1.1")
@@ -37,13 +38,14 @@ def memory_instance():
def memory_custom_instance():
with (
patch("mem0.utils.factory.EmbedderFactory") as mock_embedder,
patch("mem0.utils.factory.VectorStoreFactory") as mock_vector_store,
patch("mem0.memory.main.VectorStoreFactory") as mock_vector_store,
patch("mem0.utils.factory.LlmFactory") as mock_llm,
patch("mem0.memory.telemetry.capture_event"),
patch("mem0.memory.graph_memory.MemoryGraph"),
):
mock_embedder.create.return_value = Mock()
mock_vector_store.create.return_value = Mock()
mock_vector_store.create.return_value.search.return_value = []
mock_llm.create.return_value = Mock()
config = MemoryConfig(
@@ -250,7 +252,11 @@ def test_get_all(memory_instance, version, enable_graph, expected_result):
def test_custom_prompts(memory_custom_instance):
messages = [{"role": "user", "content": "Test message"}]
from mem0.embeddings.mock import MockEmbeddings
memory_custom_instance.llm.generate_response = Mock()
memory_custom_instance.llm.generate_response.return_value = '{"facts": ["fact1", "fact2"]}'
memory_custom_instance.embedding_model = MockEmbeddings()
with patch("mem0.memory.main.parse_messages", return_value="Test message") as mock_parse_messages:
with patch(
@@ -273,7 +279,7 @@ def test_custom_prompts(memory_custom_instance):
## custom update memory prompt
##
mock_get_update_memory_messages.assert_called_once_with(
[], [], memory_custom_instance.config.custom_update_memory_prompt
[], ["fact1", "fact2"], memory_custom_instance.config.custom_update_memory_prompt
)
memory_custom_instance.llm.generate_response.assert_any_call(