Modified the return statement for ADD call | Added tests to main.py and graph_memory.py (#1812)

This commit is contained in:
Prateek Chhikara
2024-09-09 10:04:11 -07:00
committed by GitHub
parent 58f29d8781
commit b081e43b8d
5 changed files with 300 additions and 62 deletions

View File

@@ -1,4 +1,3 @@
from mem0.llms.openai import OpenAILLM
UPDATE_GRAPH_PROMPT = """
You are an AI expert specializing in graph memory management and optimization. Your task is to analyze existing graph memories alongside new information, and update the relationships in the memory list to ensure the most accurate, current, and coherent representation of knowledge.
@@ -67,42 +66,3 @@ def get_update_memory_messages(existing_memories, memory):
"content": get_update_memory_prompt(existing_memories, memory, UPDATE_GRAPH_PROMPT),
},
]
def get_search_results(entities, query):
search_graph_prompt = f"""
You are an expert at searching through graph entity memories.
When provided with existing graph entities and a query, your task is to search through the provided graph entities to find the most relevant information from the graph entities related to the query.
The output should be from the graph entities only.
Here are the details of the task:
- Existing Graph Entities (source -> relationship -> target):
{entities}
- Query: {query}
The output should be from the graph entities only.
The output should be in the following JSON format:
{{
"search_results": [
{{
"source_node": "source_node",
"relationship": "relationship",
"target_node": "target_node"
}}
]
}}
"""
messages = [
{
"role": "user",
"content": search_graph_prompt,
}
]
llm = OpenAILLM()
results = llm.generate_response(messages=messages, response_format={"type": "json_object"})
return results