Mem0 fix in embedchain (#1506)

This commit is contained in:
Dev Khant
2024-07-20 03:33:09 +05:30
committed by GitHub
parent e9136c1aa0
commit 51fd7db205
5 changed files with 10 additions and 12 deletions

View File

@@ -51,14 +51,14 @@ Answer:
""" # noqa:E501 """ # noqa:E501
DEFAULT_PROMPT_WITH_MEM0_MEMORY = """ DEFAULT_PROMPT_WITH_MEM0_MEMORY = """
You are a Q&A expert system. Your responses must always be rooted in the context provided for each query. You are also provided with the conversation history and memories with the user. Make sure to use relevant context from conversation history and memories as needed. You are an expert at answering questions based on provided memories. You are also provided with the context and conversation history of the user. Make sure to use relevant context from conversation history and context as needed.
Here are some guidelines to follow: Here are some guidelines to follow:
1. Refrain from explicitly mentioning the context provided in your response. 1. Refrain from explicitly mentioning the context provided in your response.
2. Take into consideration the conversation history and memories provided. 2. Take into consideration the conversation history and context provided.
3. The context should silently guide your answers without being directly acknowledged. 3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
4. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc.
Striclty return the query exactly as it is if it is not a question or if no relevant information is found.
Context information: Context information:
---------------------- ----------------------

View File

@@ -600,7 +600,7 @@ class EmbedChain(JSONSerializable):
memories = None memories = None
if self.mem0_memory: if self.mem0_memory:
memories = self.mem0_memory.search( memories = self.mem0_memory.search(
query=input_query, agent_id=self.config.id, session_id=session_id, limit=self.memory_config.top_k query=input_query, agent_id=self.config.id, user_id=session_id, limit=self.memory_config.top_k
) )
# Update the history beforehand so that we can handle multiple chat sessions in the same python session # Update the history beforehand so that we can handle multiple chat sessions in the same python session
@@ -639,10 +639,9 @@ class EmbedChain(JSONSerializable):
) )
# Add to Mem0 memory if enabled # Add to Mem0 memory if enabled
# TODO: Might need to prepend with some text like: # Adding answer here because it would be much useful than input question itself
# "Remember user preferences from following user query: {input_query}"
if self.mem0_memory: if self.mem0_memory:
self.mem0_memory.add(data=input_query, agent_id=self.config.id, session_id=session_id) self.mem0_memory.add(data=answer, agent_id=self.config.id, user_id=session_id)
# add conversation in memory # add conversation in memory
self.llm.add_history(self.config.id, input_query, answer, session_id=session_id) self.llm.add_history(self.config.id, input_query, answer, session_id=session_id)

View File

@@ -60,7 +60,7 @@ class OpenAILlm(BaseLlm):
api_key = config.api_key or os.environ["OPENAI_API_KEY"] api_key = config.api_key or os.environ["OPENAI_API_KEY"]
base_url = config.base_url or os.environ.get("OPENAI_API_BASE", None) base_url = config.base_url or os.environ.get("OPENAI_API_BASE", None)
if config.top_p: if config.top_p:
kwargs["model_kwargs"]["top_p"] = config.top_p kwargs["top_p"] = config.top_p
if config.default_headers: if config.default_headers:
kwargs["default_headers"] = config.default_headers kwargs["default_headers"] = config.default_headers
if config.stream: if config.stream:

View File

@@ -521,7 +521,6 @@ def validate_config(config_data):
}, },
}, },
Optional("memory"): { Optional("memory"): {
"api_key": str,
Optional("top_k"): int, Optional("top_k"): int,
}, },
} }

View File

@@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "embedchain" name = "embedchain"
version = "0.1.117" version = "0.1.118"
description = "Simplest open source retrieval (RAG) framework" description = "Simplest open source retrieval (RAG) framework"
authors = [ authors = [
"Taranjeet Singh <taranjeet@embedchain.ai>", "Taranjeet Singh <taranjeet@embedchain.ai>",