From 38965ab6bf41b2d62e3bd4ecbb65dceebd633c17 Mon Sep 17 00:00:00 2001 From: Dev Khant Date: Fri, 9 Aug 2024 02:40:39 +0530 Subject: [PATCH] Docs for using Ollama locally (#1668) --- docs/open-source/quickstart.mdx | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/docs/open-source/quickstart.mdx b/docs/open-source/quickstart.mdx index e2a447d9..7a484264 100644 --- a/docs/open-source/quickstart.mdx +++ b/docs/open-source/quickstart.mdx @@ -195,6 +195,36 @@ m.delete_all(user_id="alice") # Delete all memories m.reset() # Reset all memories ``` +## Run Mem0 Locally + +Mem0 can be used entirely locally with Ollama, where both the embedding model and the language model (LLM) utilize Ollama. + +Here's the example on how it can be used: + +```python +import os +from mem0 import Memory + +config = { + "vector_store":{ + "provider": "qdrant", + "config": { + "embedding_model_dims": 768 # change according to embedding model + } + }, + "llm": { + "provider": "ollama" + }, + "embedder": { + "provider": "ollama" + } +} + +m = Memory.from_config(config) +m.add("I'm visiting Paris", user_id="john") +``` + + ## Chat Completion Mem0 can be easily integrate into chat applications to enhance conversational agents with structured memory. Mem0's APIs are designed to be compatible with OpenAI's, with the goal of making it easy to leverage Mem0 in applications you may have already built.