From a546a9f56ad385c9598933747241c823d7ef578a Mon Sep 17 00:00:00 2001 From: Dev Khant Date: Fri, 19 Jul 2024 01:36:40 +0530 Subject: [PATCH] Update Mem0 LLM docs (#1497) --- docs/llms.mdx | 62 ++++++++++++++++++++-- embedchain/docs/api-reference/app/chat.mdx | 10 +++- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/docs/llms.mdx b/docs/llms.mdx index ab25e921..b367202e 100644 --- a/docs/llms.mdx +++ b/docs/llms.mdx @@ -12,6 +12,8 @@ Mem0 includes built-in support for various popular large language models. Memory + + ## OpenAI @@ -24,7 +26,7 @@ Once you have obtained the key, you can use it like this: import os from mem0 import Memory -os.environ['OPENAI_API_KEY'] = 'xxx' +os.environ["OPENAI_API_KEY"] = "your-api-key" config = { "llm": { @@ -51,7 +53,7 @@ In order to use LLMs from Groq, go to their [platform](https://console.groq.com/ import os from mem0 import Memory -os.environ['GROQ_API_KEY'] = 'xxx' +os.environ["GROQ_API_KEY"] = "your-api-key" config = { "llm": { @@ -78,7 +80,7 @@ Once you have obtained the key, you can use it like this: import os from mem0 import Memory -os.environ['TOGETHER_API_KEY'] = 'xxx' +os.environ["TOGETHER_API_KEY"] = "your-api-key" config = { "llm": { @@ -133,6 +135,8 @@ m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category" import os from mem0 import Memory +os.environ["OPENAI_API_KEY"] = "your-api-key" + config = { "llm": { "provider": "litellm", @@ -148,3 +152,55 @@ m = Memory.from_config(config) m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) ``` +## Google AI + +To use Google AI model, you have to set the `GOOGLE_API_KEY` environment variable. You can obtain the Google API key from the [Google Maker Suite](https://makersuite.google.com/app/apikey) + +Once you have obtained the key, you can use it like this: + +```python +import os +from mem0 import Memory + +os.environ["GEMINI_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "litellm", + "config": { + "model": "gemini/gemini-pro", + "temperature": 0.2, + "max_tokens": 1500, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Anthropic + +To use anthropic's model, please set the `ANTHROPIC_API_KEY` which you find on their [Account Settings Page](https://console.anthropic.com/account/keys). + +```python +import os +from mem0 import Memory + +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "litellm", + "config": { + "model": "claude-3-opus-20240229", + "temperature": 0.1, + "max_tokens": 2000, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + diff --git a/embedchain/docs/api-reference/app/chat.mdx b/embedchain/docs/api-reference/app/chat.mdx index 37405475..f12b0979 100644 --- a/embedchain/docs/api-reference/app/chat.mdx +++ b/embedchain/docs/api-reference/app/chat.mdx @@ -147,7 +147,7 @@ app.chat("What is the net worth of Elon Musk?", config=query_config) ### With Mem0 to store chat history -Mem0 is a cutting-edge long-term memory for LLMs to enable personalization for the GenAI stack. It enables LLMs to remember past interactions and provide more personalized responses. +Mem0 is a cutting-edge long-term memory for LLMs to enable personalization for the GenAI stack. It enables LLMs to remember past interactions and provide more personalized responses. In order to use Mem0 to enable memory for personalization in your apps: - Install the [`mem0`](https://docs.mem0.ai/) package using `pip install mem0ai`. @@ -166,4 +166,10 @@ app = App.from_config(config=config) app.add("https://www.forbes.com/profile/elon-musk") app.chat("What is the net worth of Elon Musk?") -``` \ No newline at end of file +``` + +## How Mem0 works: +- Mem0 saves context derived from each user question into its memory. +- When a user poses a new question, Mem0 retrieves relevant previous memories. +- The `top_k` parameter in the memory configuration specifies the number of top memories to consider during retrieval. +- Mem0 generates the final response by integrating the user's question, context from the data source, and the relevant memories.