From 51fd7db205bf24355d5c4b96341d8af0b6a66985 Mon Sep 17 00:00:00 2001 From: Dev Khant Date: Sat, 20 Jul 2024 03:33:09 +0530 Subject: [PATCH] Mem0 fix in embedchain (#1506) --- embedchain/embedchain/config/llm/base.py | 10 +++++----- embedchain/embedchain/embedchain.py | 7 +++---- embedchain/embedchain/llm/openai.py | 2 +- embedchain/embedchain/utils/misc.py | 1 - embedchain/pyproject.toml | 2 +- 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/embedchain/embedchain/config/llm/base.py b/embedchain/embedchain/config/llm/base.py index 96f31c22..d62bf791 100644 --- a/embedchain/embedchain/config/llm/base.py +++ b/embedchain/embedchain/config/llm/base.py @@ -51,14 +51,14 @@ Answer: """ # noqa:E501 DEFAULT_PROMPT_WITH_MEM0_MEMORY = """ -You are a Q&A expert system. Your responses must always be rooted in the context provided for each query. You are also provided with the conversation history and memories with the user. Make sure to use relevant context from conversation history and memories as needed. +You are an expert at answering questions based on provided memories. You are also provided with the context and conversation history of the user. Make sure to use relevant context from conversation history and context as needed. Here are some guidelines to follow: - 1. Refrain from explicitly mentioning the context provided in your response. -2. Take into consideration the conversation history and memories provided. -3. The context should silently guide your answers without being directly acknowledged. -4. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc. +2. Take into consideration the conversation history and context provided. +3. Do not use phrases such as 'According to the context provided', 'Based on the context, ...' etc. + +Striclty return the query exactly as it is if it is not a question or if no relevant information is found. Context information: ---------------------- diff --git a/embedchain/embedchain/embedchain.py b/embedchain/embedchain/embedchain.py index 3ca4ddad..bc02ef05 100644 --- a/embedchain/embedchain/embedchain.py +++ b/embedchain/embedchain/embedchain.py @@ -600,7 +600,7 @@ class EmbedChain(JSONSerializable): memories = None if self.mem0_memory: memories = self.mem0_memory.search( - query=input_query, agent_id=self.config.id, session_id=session_id, limit=self.memory_config.top_k + query=input_query, agent_id=self.config.id, user_id=session_id, limit=self.memory_config.top_k ) # Update the history beforehand so that we can handle multiple chat sessions in the same python session @@ -639,10 +639,9 @@ class EmbedChain(JSONSerializable): ) # Add to Mem0 memory if enabled - # TODO: Might need to prepend with some text like: - # "Remember user preferences from following user query: {input_query}" + # Adding answer here because it would be much useful than input question itself if self.mem0_memory: - self.mem0_memory.add(data=input_query, agent_id=self.config.id, session_id=session_id) + self.mem0_memory.add(data=answer, agent_id=self.config.id, user_id=session_id) # add conversation in memory self.llm.add_history(self.config.id, input_query, answer, session_id=session_id) diff --git a/embedchain/embedchain/llm/openai.py b/embedchain/embedchain/llm/openai.py index d52d2cfb..316854e7 100644 --- a/embedchain/embedchain/llm/openai.py +++ b/embedchain/embedchain/llm/openai.py @@ -60,7 +60,7 @@ class OpenAILlm(BaseLlm): api_key = config.api_key or os.environ["OPENAI_API_KEY"] base_url = config.base_url or os.environ.get("OPENAI_API_BASE", None) if config.top_p: - kwargs["model_kwargs"]["top_p"] = config.top_p + kwargs["top_p"] = config.top_p if config.default_headers: kwargs["default_headers"] = config.default_headers if config.stream: diff --git a/embedchain/embedchain/utils/misc.py b/embedchain/embedchain/utils/misc.py index fbfb3116..193e02ea 100644 --- a/embedchain/embedchain/utils/misc.py +++ b/embedchain/embedchain/utils/misc.py @@ -521,7 +521,6 @@ def validate_config(config_data): }, }, Optional("memory"): { - "api_key": str, Optional("top_k"): int, }, } diff --git a/embedchain/pyproject.toml b/embedchain/pyproject.toml index e8ef2dd4..38d41d28 100644 --- a/embedchain/pyproject.toml +++ b/embedchain/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "embedchain" -version = "0.1.117" +version = "0.1.118" description = "Simplest open source retrieval (RAG) framework" authors = [ "Taranjeet Singh ",