Fixes: Mem0 Setup, Logging, Docs (#3080)

This commit is contained in:
Jainish
2025-07-04 03:10:39 +05:30
committed by GitHub
parent eb7c712aa6
commit a20b68fcec
5 changed files with 34 additions and 29 deletions

View File

@@ -31,13 +31,18 @@ For detailed guidance on pull requests, refer to [GitHub's documentation](https:
We use `hatch` as our package manager. Install it by following the [official instructions](https://hatch.pypa.io/latest/install/). We use `hatch` as our package manager. Install it by following the [official instructions](https://hatch.pypa.io/latest/install/).
⚠️ **Do NOT use `pip` or `conda` for dependency management.** Instead, run: ⚠️ **Do NOT use `pip` or `conda` for dependency management.** Instead, follow these steps in order:
```bash ```bash
make install_all # 1. Install base dependencies
make install
# Activate virtual environment # 2. Activate virtual environment (this will install deps.)
hatch shell hatch shell (for default env)
hatch -e dev_py_3_11 shell (for dev_py_3_11) (differences are mentioned in pyproject.toml)
# 3. Install all optional dependencies
make install_all
``` ```
--- ---

View File

@@ -336,7 +336,7 @@ class Memory(MemoryBase):
response = remove_code_blocks(response) response = remove_code_blocks(response)
new_retrieved_facts = json.loads(response)["facts"] new_retrieved_facts = json.loads(response)["facts"]
except Exception as e: except Exception as e:
logging.error(f"Error in new_retrieved_facts: {e}") logger.error(f"Error in new_retrieved_facts: {e}")
new_retrieved_facts = [] new_retrieved_facts = []
if not new_retrieved_facts: if not new_retrieved_facts:
@@ -360,7 +360,7 @@ class Memory(MemoryBase):
for item in retrieved_old_memory: for item in retrieved_old_memory:
unique_data[item["id"]] = item unique_data[item["id"]] = item
retrieved_old_memory = list(unique_data.values()) retrieved_old_memory = list(unique_data.values())
logging.info(f"Total existing memories: {len(retrieved_old_memory)}") logger.info(f"Total existing memories: {len(retrieved_old_memory)}")
# mapping UUIDs with integers for handling UUID hallucinations # mapping UUIDs with integers for handling UUID hallucinations
temp_uuid_mapping = {} temp_uuid_mapping = {}
@@ -379,14 +379,14 @@ class Memory(MemoryBase):
response_format={"type": "json_object"}, response_format={"type": "json_object"},
) )
except Exception as e: except Exception as e:
logging.error(f"Error in new memory actions response: {e}") logger.error(f"Error in new memory actions response: {e}")
response = "" response = ""
try: try:
response = remove_code_blocks(response) response = remove_code_blocks(response)
new_memories_with_actions = json.loads(response) new_memories_with_actions = json.loads(response)
except Exception as e: except Exception as e:
logging.error(f"Invalid JSON response: {e}") logger.error(f"Invalid JSON response: {e}")
new_memories_with_actions = {} new_memories_with_actions = {}
else: else:
new_memories_with_actions = {} new_memories_with_actions = {}
@@ -394,11 +394,11 @@ class Memory(MemoryBase):
returned_memories = [] returned_memories = []
try: try:
for resp in new_memories_with_actions.get("memory", []): for resp in new_memories_with_actions.get("memory", []):
logging.info(resp) logger.info(resp)
try: try:
action_text = resp.get("text") action_text = resp.get("text")
if not action_text: if not action_text:
logging.info("Skipping memory entry because of empty `text` field.") logger.info("Skipping memory entry because of empty `text` field.")
continue continue
event_type = resp.get("event") event_type = resp.get("event")
@@ -434,11 +434,11 @@ class Memory(MemoryBase):
} }
) )
elif event_type == "NONE": elif event_type == "NONE":
logging.info("NOOP for Memory.") logger.info("NOOP for Memory.")
except Exception as e: except Exception as e:
logging.error(f"Error processing memory action: {resp}, Error: {e}") logger.error(f"Error processing memory action: {resp}, Error: {e}")
except Exception as e: except Exception as e:
logging.error(f"Error iterating new_memories_with_actions: {e}") logger.error(f"Error iterating new_memories_with_actions: {e}")
keys, encoded_ids = process_telemetry_filters(filters) keys, encoded_ids = process_telemetry_filters(filters)
capture_event( capture_event(
@@ -801,7 +801,7 @@ class Memory(MemoryBase):
return self.db.get_history(memory_id) return self.db.get_history(memory_id)
def _create_memory(self, data, existing_embeddings, metadata=None): def _create_memory(self, data, existing_embeddings, metadata=None):
logging.debug(f"Creating memory with {data=}") logger.debug(f"Creating memory with {data=}")
if data in existing_embeddings: if data in existing_embeddings:
embeddings = existing_embeddings[data] embeddings = existing_embeddings[data]
else: else:
@@ -922,7 +922,7 @@ class Memory(MemoryBase):
return memory_id return memory_id
def _delete_memory(self, memory_id): def _delete_memory(self, memory_id):
logging.info(f"Deleting memory with {memory_id=}") logger.info(f"Deleting memory with {memory_id=}")
existing_memory = self.vector_store.get(vector_id=memory_id) existing_memory = self.vector_store.get(vector_id=memory_id)
prev_value = existing_memory.payload["data"] prev_value = existing_memory.payload["data"]
self.vector_store.delete(vector_id=memory_id) self.vector_store.delete(vector_id=memory_id)
@@ -1164,7 +1164,7 @@ class AsyncMemory(MemoryBase):
response = remove_code_blocks(response) response = remove_code_blocks(response)
new_retrieved_facts = json.loads(response)["facts"] new_retrieved_facts = json.loads(response)["facts"]
except Exception as e: except Exception as e:
logging.error(f"Error in new_retrieved_facts: {e}") logger.error(f"Error in new_retrieved_facts: {e}")
new_retrieved_facts = [] new_retrieved_facts = []
if not new_retrieved_facts: if not new_retrieved_facts:
@@ -1194,7 +1194,7 @@ class AsyncMemory(MemoryBase):
for item in retrieved_old_memory: for item in retrieved_old_memory:
unique_data[item["id"]] = item unique_data[item["id"]] = item
retrieved_old_memory = list(unique_data.values()) retrieved_old_memory = list(unique_data.values())
logging.info(f"Total existing memories: {len(retrieved_old_memory)}") logger.info(f"Total existing memories: {len(retrieved_old_memory)}")
temp_uuid_mapping = {} temp_uuid_mapping = {}
for idx, item in enumerate(retrieved_old_memory): for idx, item in enumerate(retrieved_old_memory):
temp_uuid_mapping[str(idx)] = item["id"] temp_uuid_mapping[str(idx)] = item["id"]
@@ -1211,20 +1211,20 @@ class AsyncMemory(MemoryBase):
response_format={"type": "json_object"}, response_format={"type": "json_object"},
) )
except Exception as e: except Exception as e:
logging.error(f"Error in new memory actions response: {e}") logger.error(f"Error in new memory actions response: {e}")
response = "" response = ""
try: try:
response = remove_code_blocks(response) response = remove_code_blocks(response)
new_memories_with_actions = json.loads(response) new_memories_with_actions = json.loads(response)
except Exception as e: except Exception as e:
logging.error(f"Invalid JSON response: {e}") logger.error(f"Invalid JSON response: {e}")
new_memories_with_actions = {} new_memories_with_actions = {}
returned_memories = [] returned_memories = []
try: try:
memory_tasks = [] memory_tasks = []
for resp in new_memories_with_actions.get("memory", []): for resp in new_memories_with_actions.get("memory", []):
logging.info(resp) logger.info(resp)
try: try:
action_text = resp.get("text") action_text = resp.get("text")
if not action_text: if not action_text:
@@ -1254,9 +1254,9 @@ class AsyncMemory(MemoryBase):
task = asyncio.create_task(self._delete_memory(memory_id=temp_uuid_mapping[resp.get("id")])) task = asyncio.create_task(self._delete_memory(memory_id=temp_uuid_mapping[resp.get("id")]))
memory_tasks.append((task, resp, "DELETE", temp_uuid_mapping[resp.get("id")])) memory_tasks.append((task, resp, "DELETE", temp_uuid_mapping[resp.get("id")]))
elif event_type == "NONE": elif event_type == "NONE":
logging.info("NOOP for Memory (async).") logger.info("NOOP for Memory (async).")
except Exception as e: except Exception as e:
logging.error(f"Error processing memory action (async): {resp}, Error: {e}") logger.error(f"Error processing memory action (async): {resp}, Error: {e}")
for task, resp, event_type, mem_id in memory_tasks: for task, resp, event_type, mem_id in memory_tasks:
try: try:
@@ -1275,9 +1275,9 @@ class AsyncMemory(MemoryBase):
elif event_type == "DELETE": elif event_type == "DELETE":
returned_memories.append({"id": mem_id, "memory": resp.get("text"), "event": event_type}) returned_memories.append({"id": mem_id, "memory": resp.get("text"), "event": event_type})
except Exception as e: except Exception as e:
logging.error(f"Error awaiting memory task (async): {e}") logger.error(f"Error awaiting memory task (async): {e}")
except Exception as e: except Exception as e:
logging.error(f"Error in memory processing loop (async): {e}") logger.error(f"Error in memory processing loop (async): {e}")
keys, encoded_ids = process_telemetry_filters(effective_filters) keys, encoded_ids = process_telemetry_filters(effective_filters)
capture_event( capture_event(
@@ -1653,7 +1653,7 @@ class AsyncMemory(MemoryBase):
return await asyncio.to_thread(self.db.get_history, memory_id) return await asyncio.to_thread(self.db.get_history, memory_id)
async def _create_memory(self, data, existing_embeddings, metadata=None): async def _create_memory(self, data, existing_embeddings, metadata=None):
logging.debug(f"Creating memory with {data=}") logger.debug(f"Creating memory with {data=}")
if data in existing_embeddings: if data in existing_embeddings:
embeddings = existing_embeddings[data] embeddings = existing_embeddings[data]
else: else:
@@ -1795,7 +1795,7 @@ class AsyncMemory(MemoryBase):
return memory_id return memory_id
async def _delete_memory(self, memory_id): async def _delete_memory(self, memory_id):
logging.info(f"Deleting memory with {memory_id=}") logger.info(f"Deleting memory with {memory_id=}")
existing_memory = await asyncio.to_thread(self.vector_store.get, vector_id=memory_id) existing_memory = await asyncio.to_thread(self.vector_store.get, vector_id=memory_id)
prev_value = existing_memory.payload["data"] prev_value = existing_memory.payload["data"]

View File

@@ -98,7 +98,7 @@ class PineconeDB(VectorStoreBase):
existing_indexes = self.list_cols().names() existing_indexes = self.list_cols().names()
if self.collection_name in existing_indexes: if self.collection_name in existing_indexes:
logging.debug(f"Index {self.collection_name} already exists. Skipping creation.") logger.debug(f"Index {self.collection_name} already exists. Skipping creation.")
self.index = self.client.Index(self.collection_name) self.index = self.client.Index(self.collection_name)
return return

View File

@@ -83,7 +83,7 @@ class Qdrant(VectorStoreBase):
response = self.list_cols() response = self.list_cols()
for collection in response.collections: for collection in response.collections:
if collection.name == self.collection_name: if collection.name == self.collection_name:
logging.debug(f"Collection {self.collection_name} already exists. Skipping creation.") logger.debug(f"Collection {self.collection_name} already exists. Skipping creation.")
return return
self.client.create_collection( self.client.create_collection(

View File

@@ -102,7 +102,7 @@ class Weaviate(VectorStoreBase):
distance (str, optional): Distance metric for vector similarity. Defaults to "cosine". distance (str, optional): Distance metric for vector similarity. Defaults to "cosine".
""" """
if self.client.collections.exists(self.collection_name): if self.client.collections.exists(self.collection_name):
logging.debug(f"Collection {self.collection_name} already exists. Skipping creation.") logger.debug(f"Collection {self.collection_name} already exists. Skipping creation.")
return return
properties = [ properties = [