Add infer param and version bump (#2389)

Co-authored-by: Deshraj Yadav <deshrajdry@gmail.com>
This commit is contained in:
Dev Khant
2025-03-18 01:04:58 +05:30
committed by GitHub
parent e25dc4b504
commit d7a26bd0c3
4 changed files with 26 additions and 7 deletions

View File

@@ -16,8 +16,12 @@ from mem0.memory.base import MemoryBase
from mem0.memory.setup import setup_config
from mem0.memory.storage import SQLiteManager
from mem0.memory.telemetry import capture_event
from mem0.memory.utils import (get_fact_retrieval_messages, parse_messages,
parse_vision_messages, remove_code_blocks)
from mem0.memory.utils import (
get_fact_retrieval_messages,
parse_messages,
parse_vision_messages,
remove_code_blocks,
)
from mem0.utils.factory import EmbedderFactory, LlmFactory, VectorStoreFactory
# Setup user config
@@ -82,6 +86,7 @@ class Memory(MemoryBase):
run_id=None,
metadata=None,
filters=None,
infer=True,
prompt=None,
):
"""
@@ -94,6 +99,7 @@ class Memory(MemoryBase):
run_id (str, optional): ID of the run creating the memory. Defaults to None.
metadata (dict, optional): Metadata to store with the memory. Defaults to None.
filters (dict, optional): Filters to apply to the search. Defaults to None.
infer (bool, optional): Whether to infer the memories. Defaults to True.
prompt (str, optional): Prompt to use for memory deduction. Defaults to None.
Returns:
@@ -132,7 +138,7 @@ class Memory(MemoryBase):
messages = parse_vision_messages(messages)
with concurrent.futures.ThreadPoolExecutor() as executor:
future1 = executor.submit(self._add_to_vector_store, messages, metadata, filters)
future1 = executor.submit(self._add_to_vector_store, messages, metadata, filters, infer)
future2 = executor.submit(self._add_to_graph, messages, filters)
concurrent.futures.wait([future1, future2])
@@ -158,7 +164,16 @@ class Memory(MemoryBase):
return {"results": vector_store_result}
def _add_to_vector_store(self, messages, metadata, filters):
def _add_to_vector_store(self, messages, metadata, filters, infer):
if not infer:
returned_memories = []
for message in messages:
if message["role"] != "system":
message_embeddings = self.embedding_model.embed(message["content"], "add")
memory_id = self._create_memory(message["content"], message_embeddings, metadata)
returned_memories.append({"id": memory_id, "memory": message["content"], "event": "ADD"})
return returned_memories
parsed_messages = parse_messages(messages)
if self.custom_prompt: