Updated docs for typescript package (#2269)

This commit is contained in:
Saket Aryan
2025-02-28 07:52:28 +05:30
committed by GitHub
parent f8071a753b
commit 434b555a29
22 changed files with 1867 additions and 517 deletions

View File

@@ -71,6 +71,12 @@ Install the Mem0 package via pip:
pip install mem0ai
```
Install the Mem0 package via npm:
```bash
npm install mem0ai
```
### Basic Usage
Mem0 requires an LLM to function, with `gpt-4o` from OpenAI as the default. However, it supports a variety of LLMs; for details, refer to our [Supported LLMs documentation](https://docs.mem0.ai/llms).
@@ -114,6 +120,8 @@ if __name__ == "__main__":
main()
```
See the example for [Node.js](https://docs.mem0.ai/examples/ai_companion_js).
For more advanced usage and API documentation, visit our [documentation](https://docs.mem0.ai).
> [!TIP]

View File

@@ -64,6 +64,7 @@
"icon": "code-branch",
"pages": [
"open-source/quickstart",
"open-source/python-quickstart",
{
"group": "Features",
"icon": "wrench",
@@ -150,6 +151,67 @@
]
}
]
},
{
"group": "Node.js",
"icon": "js",
"pages": [
"open-source-typescript/quickstart",
{
"group": "Features",
"icon": "wrench",
"pages": [
"open-source-typescript/features/custom-prompts"
]
},
{
"group": "LLMs",
"icon": "brain",
"pages": [
"open-source-typescript/components/llms/overview",
"open-source-typescript/components/llms/config",
{
"group": "Supported LLMs",
"icon": "list",
"pages": [
"open-source-typescript/components/llms/models/openai",
"open-source-typescript/components/llms/models/anthropic",
"open-source-typescript/components/llms/models/groq"
]
}
]
},{
"group": "Vector Databases",
"icon": "database",
"pages": [
"open-source-typescript/components/vectordbs/overview",
"open-source-typescript/components/vectordbs/config",
{
"group": "Supported Vector Databases",
"icon": "server",
"pages": [
"open-source-typescript/components/vectordbs/dbs/qdrant",
"open-source-typescript/components/vectordbs/dbs/redis"
]
}
]
},
{
"group": "Embedding Models",
"icon": "layer-group",
"pages": [
"open-source-typescript/components/embedders/overview",
"open-source-typescript/components/embedders/config",
{
"group": "Supported Embedding Models",
"icon": "list",
"pages": [
"open-source-typescript/components/embedders/models/openai"
]
}
]
}
]
}
]
}
@@ -163,6 +225,7 @@
"icon": "lightbulb",
"pages": [
"examples/overview",
"examples/ai_companion_js",
"examples/mem0-with-ollama",
"examples/personal-ai-tutor",
"examples/customer-support-agent",

View File

@@ -0,0 +1,126 @@
---
title: AI Companion in Node.js
---
You can create a personalised AI Companion using Mem0. This guide will walk you through the necessary steps and provide the complete code to get you started.
## Overview
The Personalized AI Companion leverages Mem0 to retain information across interactions, enabling a tailored learning experience. It creates memories for each user interaction and integrates with OpenAI's GPT models to provide detailed and context-aware responses to user queries.
## Setup
Before you begin, ensure you have Node.js installed and create a new project. Install the required dependencies using npm:
```bash
npm install openai mem0ai
```
## Full Code Example
Below is the complete code to create and interact with an AI Companion using Mem0:
```javascript
import { OpenAI } from 'openai';
import { Memory } from 'mem0ai/oss';
import * as readline from 'readline';
const openaiClient = new OpenAI();
const memory = new Memory();
async function chatWithMemories(message, userId = "default_user") {
const relevantMemories = await memory.search(message, userId);
const memoriesStr = relevantMemories.results
.map(entry => `- ${entry.memory}`)
.join('\n');
const systemPrompt = `You are a helpful AI. Answer the question based on query and memories.
User Memories:
${memoriesStr}`;
const messages = [
{ role: "system", content: systemPrompt },
{ role: "user", content: message }
];
const response = await openaiClient.chat.completions.create({
model: "gpt-4o-mini",
messages: messages
});
const assistantResponse = response.choices[0].message.content || "";
messages.push({ role: "assistant", content: assistantResponse });
await memory.add(messages, userId);
return assistantResponse;
}
async function main() {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
console.log("Chat with AI (type 'exit' to quit)");
const askQuestion = () => {
return new Promise((resolve) => {
rl.question("You: ", (input) => {
resolve(input.trim());
});
});
};
try {
while (true) {
const userInput = await askQuestion();
if (userInput.toLowerCase() === 'exit') {
console.log("Goodbye!");
rl.close();
break;
}
const response = await chatWithMemories(userInput, "sample_user");
console.log(`AI: ${response}`);
}
} catch (error) {
console.error("An error occurred:", error);
rl.close();
}
}
main().catch(console.error);
```
### Key Components
1. **Initialization**
- The code initializes both OpenAI and Mem0 Memory clients
- Uses Node.js's built-in readline module for command-line interaction
2. **Memory Management (chatWithMemories function)**
- Retrieves relevant memories using Mem0's search functionality
- Constructs a system prompt that includes past memories
- Makes API calls to OpenAI for generating responses
- Stores new interactions in memory
3. **Interactive Chat Interface (main function)**
- Creates a command-line interface for user interaction
- Handles user input and displays AI responses
- Includes graceful exit functionality
### Environment Setup
Make sure to set up your environment variables:
```bash
export OPENAI_API_KEY=your_api_key
```
### Conclusion
This implementation demonstrates how to create an AI Companion that maintains context across conversations using Mem0's memory capabilities. The system automatically stores and retrieves relevant information, creating a more personalized and context-aware interaction experience.
As users interact with the system, Mem0's memory system continuously learns and adapts, making future responses more relevant and personalized. This setup is ideal for creating long-term learning AI assistants that can maintain context and provide increasingly personalized responses over time.

View File

@@ -17,6 +17,9 @@ Here are some examples of how Mem0 can be integrated into various applications:
## Examples
<CardGroup cols={2}>
<Card title="AI Companion in Node.js" icon="square-6" href="/examples/ai_companion_js">
Create a Personalized AI Companion using Mem0 in Node.js.
</Card>
<Card title="Mem0 with Ollama" icon="square-1" href="/examples/mem0-with-ollama">
Run Mem0 locally with Ollama.
</Card>

View File

@@ -0,0 +1,56 @@
---
title: Configurations
icon: "gear"
iconType: "solid"
---
Config in Mem0 is a dictionary that specifies the settings for your embedding models. It allows you to customize the behavior and connection details of your chosen embedder.
## How to define configurations?
The config is defined as a TypeScript object with two main keys:
- `embedder`: Specifies the embedder provider and its configuration
- `provider`: The name of the embedder (e.g., "openai", "ollama")
- `config`: A nested object containing provider-specific settings
## How to use configurations?
Here's a general example of how to use the config with Mem0:
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
embedder: {
provider: 'openai',
config: {
apiKey: 'your-openai-api-key',
model: 'text-embedding-3-small',
},
},
};
const memory = new Memory(config);
await memory.add("Your text here", { userId: "user", metadata: { category: "example" } });
```
## Why is Config Needed?
Config is essential for:
1. Specifying which embedding model to use.
2. Providing necessary connection details (e.g., model, api_key, embedding_dims).
3. Ensuring proper initialization and connection to your chosen embedder.
## Master List of All Params in Config
Here's a comprehensive list of all parameters that can be used across different embedders:
| Parameter | Description |
|------------------------|--------------------------------------------------|
| `model` | Embedding model to use |
| `apiKey` | API key of the provider |
| `embeddingDims` | Dimensions of the embedding model |
## Supported Embedding Models
For detailed information on configuring specific embedders, please visit the [Embedding Models](./models) section. There you'll find information for each supported embedder with provider-specific usage examples and configuration details.

View File

@@ -0,0 +1,36 @@
---
title: OpenAI
---
To use OpenAI embedding models, you need to provide the API key directly in your configuration. You can obtain the OpenAI API key from the [OpenAI Platform](https://platform.openai.com/account/api-keys).
### Usage
Here's how to configure OpenAI embedding models in your application:
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
embedder: {
provider: 'openai',
config: {
apiKey: 'your-openai-api-key',
model: 'text-embedding-3-large',
},
},
};
const memory = new Memory(config);
await memory.add("I'm visiting Paris", { userId: "john" });
```
### Config
Here are the parameters available for configuring the OpenAI embedder:
| Parameter | Description | Default Value |
|------------------------|--------------------------------------------------|---------------|
| `model` | The name of the embedding model to use | `text-embedding-3-small` |
| `embedding_dims` | Dimensions of the embedding model | `1536` |
| `api_key` | The OpenAI API key | `None` |

View File

@@ -0,0 +1,21 @@
---
title: Overview
icon: "info"
iconType: "solid"
---
Mem0 offers support for various embedding models, allowing users to choose the one that best suits their needs.
## Supported Embedders
See the list of supported embedders below.
<CardGroup cols={1}>
<Card title="OpenAI" href="/components/embedders/models/openai"></Card>
</CardGroup>
## Usage
To utilize a embedder, you must provide a configuration to customize its usage. If no configuration is supplied, a default configuration will be applied, and `OpenAI` will be used as the embedder.
For a comprehensive list of available parameters for embedder configuration, please refer to [Config](./config).

View File

@@ -0,0 +1,85 @@
---
title: Configurations
icon: "gear"
iconType: "solid"
---
## How to define configurations?
The `config` is defined as a TypeScript object with two main keys:
- `llm`: Specifies the LLM provider and its configuration
- `provider`: The name of the LLM (e.g., "openai", "groq")
- `config`: A nested object containing provider-specific settings
### Config Values Precedence
Config values are applied in the following order of precedence (from highest to lowest):
1. Values explicitly set in the `config` object
2. Environment variables (e.g., `OPENAI_API_KEY`, `OPENAI_API_BASE`)
3. Default values defined in the LLM implementation
This means that values specified in the `config` object will override corresponding environment variables, which in turn override default values.
## How to Use Config
Here's a general example of how to use the config with Mem0:
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
llm: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'gpt-4-turbo-preview',
temperature: 0.2,
maxTokens: 1500,
},
},
embedder: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'text-embedding-3-small',
},
},
vectorStore: {
provider: 'memory',
config: {
collectionName: 'memories',
dimension: 1536,
},
},
historyDbPath: 'memory.db',
};
const memory = new Memory(config);
memory.add("Your text here", { userId: "user123", metadata: { category: "example" } });
```
## Why is Config Needed?
Config is essential for:
1. Specifying which LLM to use.
2. Providing necessary connection details (e.g., model, api_key, temperature).
3. Ensuring proper initialization and connection to your chosen LLM.
## Master List of All Params in Config
Here's a comprehensive list of all parameters that can be used across different LLMs:
| Parameter | Description | Provider |
|----------------------|-----------------------------------------------|-------------------|
| `model` | Embedding model to use | All |
| `temperature` | Temperature of the model | All |
| `apiKey` | API key to use | All |
| `maxTokens` | Tokens to generate | All |
| `topP` | Probability threshold for nucleus sampling | All |
| `topK` | Number of highest probability tokens to keep | All |
| `openaiBaseUrl` | Base URL for OpenAI API | OpenAI |
## Supported LLMs
For detailed information on configuring specific LLMs, please visit the [LLMs](./models) section. There you'll find information for each supported LLM with provider-specific usage examples and configuration details.

View File

@@ -0,0 +1,30 @@
---
title: Anthropic
---
To use Anthropic's models, please set the `ANTHROPIC_API_KEY`, which you can find on their [Account Settings Page](https://console.anthropic.com/account/keys).
## Usage
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
llm: {
provider: 'anthropic',
config: {
apiKey: process.env.ANTHROPIC_API_KEY || '',
model: 'claude-3-7-sonnet-latest',
temperature: 0.1,
maxTokens: 2000,
},
},
};
const memory = new Memory(config);
await memory.add("Likes to play cricket on weekends", { userId: "alice", metadata: { category: "hobbies" } });
```
## Config
All available parameters for the `anthropic` config are present in the [Master List of All Params in Config](../config).

View File

@@ -0,0 +1,32 @@
---
title: Groq
---
[Groq](https://groq.com/) is the creator of the world's first Language Processing Unit (LPU), providing exceptional speed performance for AI workloads running on their LPU Inference Engine.
In order to use LLMs from Groq, go to their [platform](https://console.groq.com/keys) and get the API key. Set the API key as `GROQ_API_KEY` environment variable to use the model as given below in the example.
## Usage
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
llm: {
provider: 'groq',
config: {
apiKey: process.env.GROQ_API_KEY || '',
model: 'mixtral-8x7b-32768',
temperature: 0.1,
maxTokens: 1000,
},
},
};
const memory = new Memory(config);
await memory.add("Likes to play cricket on weekends", { userId: "alice", metadata: { category: "hobbies" } });
```
## Config
All available parameters for the `groq` config are present in the [Master List of All Params in Config](../config).

View File

@@ -0,0 +1,30 @@
---
title: OpenAI
---
To use OpenAI LLM models, you need to set the `OPENAI_API_KEY` environment variable. You can obtain the OpenAI API key from the [OpenAI Platform](https://platform.openai.com/account/api-keys).
## Usage
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
llm: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'gpt-4-turbo-preview',
temperature: 0.2,
maxTokens: 1500,
},
},
};
const memory = new Memory(config);
await memory.add("Likes to play cricket on weekends", { userId: "alice", metadata: { category: "hobbies" } });
```
## Config
All available parameters for the `openai` config are present in the [Master List of All Params in Config](../config).

View File

@@ -0,0 +1,45 @@
---
title: Overview
icon: "info"
iconType: "solid"
---
Mem0 includes built-in support for various popular large language models. Memory can utilize the LLM provided by the user, ensuring efficient use for specific needs.
## Usage
To use a llm, you must provide a configuration to customize its usage. If no configuration is supplied, a default configuration will be applied, and `OpenAI` will be used as the llm.
For a comprehensive list of available parameters for llm configuration, please refer to [Config](./config).
To view all supported llms, visit the [Supported LLMs](./models).
<CardGroup cols={3}>
<Card title="OpenAI" href="/open-source-typescript/components/llms/models/openai"></Card>
<Card title="Anthropic" href="/open-source-typescript/components/llms/models/anthropic"></Card>
<Card title="Groq" href="/open-source-typescript/components/llms/models/groq"></Card>
</CardGroup>
## Structured vs Unstructured Outputs
Mem0 supports two types of OpenAI LLM formats, each with its own strengths and use cases:
### Structured Outputs
Structured outputs are LLMs that align with OpenAI's structured outputs model:
- **Optimized for:** Returning structured responses (e.g., JSON objects)
- **Benefits:** Precise, easily parseable data
- **Ideal for:** Data extraction, form filling, API responses
- **Learn more:** [OpenAI Structured Outputs Guide](https://platform.openai.com/docs/guides/structured-outputs/introduction)
### Unstructured Outputs
Unstructured outputs correspond to OpenAI's standard, free-form text model:
- **Flexibility:** Returns open-ended, natural language responses
- **Customization:** Use the `response_format` parameter to guide output
- **Trade-off:** Less efficient than structured outputs for specific data needs
- **Best for:** Creative writing, explanations, general conversation
Choose the format that best suits your application's requirements for optimal performance and usability.

View File

@@ -0,0 +1,100 @@
---
title: Configurations
icon: "gear"
iconType: "solid"
---
## How to define configurations?
The `config` is defined as a TypeScript object with two main keys:
- `vectorStore`: Specifies the vector database provider and its configuration
- `provider`: The name of the vector database (e.g., "chroma", "pgvector", "qdrant", "milvus", "azure_ai_search", "redis", "memory")
- `config`: A nested object containing provider-specific settings
## In-Memory Storage Option
We also support an in-memory storage option for the vector store, which is useful for reduced overhead and faster access times. Here's how to configure it:
### Example for In-Memory Storage
```typescript
const configMemory = {
vector_store: {
provider: 'memory',
config: {
collectionName: 'memories',
dimension: 1536,
},
},
};
const memory = new Memory(configMemory);
await memory.add("Your text here", { userId: "user", metadata: { category: "example" } });
```
## How to Use Config
Here's a general example of how to use the config with Mem0:
### Example for qdrant
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
vector_store: {
provider: 'qdrant',
config: {
collectionName: 'memories',
embeddingModelDims: 1536,
host: 'localhost',
port: 6333,
url: 'https://your-qdrant-url.com',
apiKey: 'your-qdrant-api-key',
onDisk: true,
},
},
};
const memory = new Memory(config);
await memory.add("Your text here", { userId: "user", metadata: { category: "example" } });
```
## Why is Config Needed?
Config is essential for:
1. Specifying which vector database to use.
2. Providing necessary connection details (e.g., host, port, credentials).
3. Customizing database-specific settings (e.g., collection name, path).
4. Ensuring proper initialization and connection to your chosen vector store.
## Master List of All Params in Config
Here's a comprehensive list of all parameters that can be used across different vector databases:
| Parameter | Description |
|------------------------|--------------------------------------|
| `collectionName` | Name of the collection |
| `dimension` | Dimensions of the embedding model |
| `host` | Host where the server is running |
| `port` | Port where the server is running |
| `embeddingModelDims` | Dimensions of the embedding model |
| `url` | URL for the Qdrant server |
| `apiKey` | API key for the Qdrant server |
| `path` | Path for the Qdrant server |
| `onDisk` | Enable persistent storage (for Qdrant) |
| `redisUrl` | URL for the Redis server |
| `username` | Username for Redis connection |
| `password` | Password for Redis connection |
## Customizing Config
Each vector database has its own specific configuration requirements. To customize the config for your chosen vector store:
1. Identify the vector database you want to use from [supported vector databases](./dbs).
2. Refer to the `Config` section in the respective vector database's documentation.
3. Include only the relevant parameters for your chosen database in the `config` object.
## Supported Vector Databases
For detailed information on configuring specific vector databases, please visit the [Supported Vector Databases](./dbs) section. There you'll find individual pages for each supported vector store with provider-specific usage examples and configuration details.

View File

@@ -0,0 +1,44 @@
[pgvector](https://github.com/pgvector/pgvector) is open-source vector similarity search for Postgres. After connecting with Postgres, run `CREATE EXTENSION IF NOT EXISTS vector;` to create the vector extension.
### Usage
Here's how to configure pgvector in your application:
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
vector_store: {
provider: 'pgvector',
config: {
collectionName: 'memories',
dimension: 1536,
dbname: 'vectordb',
user: 'postgres',
password: 'postgres',
host: 'localhost',
port: 5432,
embeddingModelDims: 1536,
hnsw: true,
},
},
};
const memory = new Memory(config);
await memory.add("Likes to play cricket on weekends", { userId: "alice", metadata: { category: "hobbies" } });
```
### Config
Here's the parameters available for configuring pgvector:
| Parameter | Description | Default Value |
|------------------------|--------------------------------------------------|---------------|
| `dbname` | The name of the database | `postgres` |
| `collectionName` | The name of the collection | `mem0` |
| `embeddingModelDims` | Dimensions of the embedding model | `1536` |
| `user` | Username to connect to the database | `None` |
| `password` | Password to connect to the database | `None` |
| `host` | The host where the Postgres server is running | `None` |
| `port` | The port where the Postgres server is running | `None` |
| `hnsw` | Enable HNSW indexing | `False` |

View File

@@ -0,0 +1,42 @@
[Qdrant](https://qdrant.tech/) is an open-source vector search engine. It is designed to work with large-scale datasets and provides a high-performance search engine for vector data.
### Usage
Here's how to configure Qdrant in your application:
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
vector_store: {
provider: 'qdrant',
config: {
collectionName: 'memories',
embeddingModelDims: 1536,
host: 'localhost',
port: 6333,
url: 'https://your-qdrant-url.com',
apiKey: 'your-qdrant-api-key',
onDisk: true,
},
},
};
const memory = new Memory(config);
await memory.add("Likes to play cricket on weekends", { userId: "alice", metadata: { category: "hobbies" } });
```
### Config
Let's see the available parameters for the `qdrant` config:
| Parameter | Description | Default Value |
|------------------------|--------------------------------------------------|---------------|
| `collectionName` | The name of the collection to store the vectors | `mem0` |
| `embeddingModelDims` | Dimensions of the embedding model | `1536` |
| `host` | The host where the Qdrant server is running | `None` |
| `port` | The port where the Qdrant server is running | `None` |
| `path` | Path for the Qdrant database | `/tmp/qdrant` |
| `url` | Full URL for the Qdrant server | `None` |
| `apiKey` | API key for the Qdrant server | `None` |
| `onDisk` | For enabling persistent storage | `False` |

View File

@@ -0,0 +1,47 @@
[Redis](https://redis.io/) is a scalable, real-time database that can store, search, and analyze vector data.
### Installation
```bash
pip install redis redisvl
```
Redis Stack using Docker:
```bash
docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest
```
### Usage
Here's how to configure Redis in your application:
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
vector_store: {
provider: 'redis',
config: {
collectionName: 'memories',
embeddingModelDims: 1536,
redisUrl: 'redis://localhost:6379',
username: 'your-redis-username',
password: 'your-redis-password',
},
},
};
const memoryRedis = new Memory(config);
await memoryRedis.add("Likes to play cricket on weekends", { userId: "alice", metadata: { category: "hobbies" } });
```
### Config
Let's see the available parameters for the `redis` config:
| Parameter | Description | Default Value |
|------------------------|--------------------------------------------------|---------------|
| `collectionName` | The name of the collection to store the vectors | `mem0` |
| `embeddingModelDims` | Dimensions of the embedding model | `1536` |
| `redisUrl` | The URL of the Redis server | `None` |
| `username` | Username for Redis connection | `None` |
| `password` | Password for Redis connection | `None` |

View File

@@ -0,0 +1,36 @@
---
title: Overview
icon: "info"
iconType: "solid"
---
Mem0 includes built-in support for various popular databases. Memory can utilize the database provided by the user, ensuring efficient use for specific needs.
## Supported Vector Databases
See the list of supported vector databases below.
<CardGroup cols={2}>
<Card title="Memory" href="/components/vectordbs/dbs/memory"></Card>
<Card title="Qdrant" href="/components/vectordbs/dbs/qdrant"></Card>
<Card title="Redis" href="/components/vectordbs/dbs/redis"></Card>
<Card title="Pgvector" href="/components/vectordbs/dbs/pgvector"></Card>
</CardGroup>
## Usage
To utilize a vector database, you must provide a configuration to customize its usage. If no configuration is supplied, a default configuration will be applied, and `Memory` will be used as the vector database.
For a comprehensive list of available parameters for vector database configuration, please refer to [Config](./config).
## Common issues
### Using model with different dimensions
If you are using customized model, which is having different dimensions other than 1536
for example 768, you may encounter below error:
`ValueError: shapes (0,1536) and (768,) not aligned: 1536 (dim 1) != 768 (dim 0)`
you could add `"embedding_model_dims": 768,` to the config of the vector_store to overcome this issue.

View File

@@ -0,0 +1,141 @@
---
title: Custom Prompts
description: 'Enhance your product experience by adding custom prompts tailored to your needs'
icon: "pencil"
iconType: "solid"
---
## Introduction to Custom Prompts
Custom prompts allow you to tailor the behavior of your Mem0 instance to specific use cases or domains.
By defining a custom prompt, you can control how information is extracted, processed, and stored in your memory system.
To create an effective custom prompt:
1. Be specific about the information to extract.
2. Provide few-shot examples to guide the LLM.
3. Ensure examples follow the format shown below.
Example of a custom prompt:
```typescript
const customPrompt = `
Please only extract entities containing customer support information, order details, and user information.
Here are some few shot examples:
Input: Hi.
Output: {"facts" : []}
Input: The weather is nice today.
Output: {"facts" : []}
Input: My order #12345 hasn't arrived yet.
Output: {"facts" : ["Order #12345 not received"]}
Input: I'm John Doe, and I'd like to return the shoes I bought last week.
Output: {"facts" : ["Customer name: John Doe", "Wants to return shoes", "Purchase made last week"]}
Input: I ordered a red shirt, size medium, but received a blue one instead.
Output: {"facts" : ["Ordered red shirt, size medium", "Received blue shirt instead"]}
Return the facts and customer information in a json format as shown above.
`;
```
Here we initialize the custom prompt in the config:
```typescript
import { Memory } from 'mem0ai/oss';
const config = {
version: 'v1.1',
embedder: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'text-embedding-3-small',
},
},
vectorStore: {
provider: 'memory',
config: {
collectionName: 'memories',
dimension: 1536,
},
},
llm: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'gpt-4-turbo-preview',
temperature: 0.2,
maxTokens: 1500,
},
},
customPrompt: customPrompt,
historyDbPath: 'memory.db',
};
const memory = new Memory(config);
```
### Example 1
In this example, we are adding a memory of a user ordering a laptop. As seen in the output, the custom prompt is used to extract the relevant information from the user's message.
<CodeGroup>
```typescript Code
await memory.add('Yesterday, I ordered a laptop, the order id is 12345', 'user123');
```
```json Output
{
"results": [
{
"id": "c03c9045-df76-4949-bbc5-d5dc1932aa5c",
"memory": "Ordered a laptop",
"metadata": {}
},
{
"id": "cbb1fe73-0bf1-4067-8c1f-63aa53e7b1a4",
"memory": "Order ID: 12345",
"metadata": {}
},
{
"id": "e5f2a012-3b45-4c67-9d8e-123456789abc",
"memory": "Order placed yesterday",
"metadata": {}
}
]
}
```
</CodeGroup>
### Example 2
In this example, we are adding a memory of a user liking to go on hikes. This add message is not specific to the use-case mentioned in the custom prompt.
Hence, the memory is not added.
<CodeGroup>
```typescript Code
await memory.add('I like going to hikes', 'user123');
```
```json Output
{
"results": []
}
```
</CodeGroup>
You can also use custom prompts with chat messages:
```typescript
const messages = [
{ role: 'user', content: 'Hi, I ordered item #54321 last week but haven\'t received it yet.' },
{ role: 'assistant', content: 'I understand you\'re concerned about your order #54321. Let me help track that for you.' }
];
await memory.add(messages, 'user123');
```
The custom prompt will process both the user and assistant messages to extract relevant information according to the defined format.

View File

@@ -0,0 +1,338 @@
---
title: Node.js Guide
description: 'Get started with Mem0 quickly!'
icon: "node"
iconType: "solid"
---
> Welcome to the Mem0 quickstart guide. This guide will help you get up and running with Mem0 in no time.
## Installation
To install Mem0, you can use npm. Run the following command in your terminal:
```bash
npm install mem0ai
```
## Basic Usage
### Initialize Mem0
<Tabs>
<Tab title="Basic">
```typescript
import { Memory } from 'mem0ai/oss';
const memory = new Memory();
```
</Tab>
<Tab title="Advanced">
If you want to run Mem0 in production, initialize using the following method:
```typescript
import { Memory } from 'mem0ai/oss';
const memory = new Memory({
version: 'v1.1',
embedder: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'text-embedding-3-small',
},
},
vectorStore: {
provider: 'memory',
config: {
collectionName: 'memories',
dimension: 1536,
},
},
llm: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'gpt-4-turbo-preview',
},
},
historyDbPath: 'memory.db',
});
```
</Tab>
</Tabs>
### Store a Memory
<CodeGroup>
```typescript Code
// For a user
const result = await memory.add('Hi, my name is John and I am a software', 'user123');
console.log(result);
// const messages = [
// {"role": "user", "content": "Hi, I'm Alex. I like to play cricket on weekends."},
// {"role": "assistant", "content": "Hello Alex! It's great to know that you enjoy playing cricket on weekends. I'll remember that for future reference."}
// ]
// await memory.add(messages, 'user123');
```
```json Output
{
"results": [
{
"id": "c03c9045-df76-4949-bbc5-d5dc1932aa5c",
"memory": "Name is John",
"metadata": [Object]
},
{
"id": "cbb1fe73-0bf1-4067-8c1f-63aa53e7b1a4",
"memory": "Is a software",
"metadata": [Object]
}
]
}
```
</CodeGroup>
### Retrieve Memories
<CodeGroup>
```typescript Code
// Get all memories
const allMemories = await memory.getAll('user123');
console.log(allMemories)
```
```json Output
{
"results": [
{
"id": "892db2ae-06d9-49e5-8b3e-585ef9b85b8e",
"memory": "Name is Alex Jones",
"hash": "1a271c007316c94377175ee80e746a19",
"createdAt": "2025-02-27T16:33:20.557Z",
"updatedAt": "2025-02-27T16:33:27.051Z",
"metadata": {},
"userId": "user123"
},
{
"id": "475bde34-21e6-42ab-8bef-0ab84474f156",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"createdAt": "2025-02-27T16:33:20.560Z",
"updatedAt": undefined,
"metadata": {},
"userId": "user123"
}
]
}
```
</CodeGroup>
<br />
<CodeGroup>
```typescript Code
// Get a single memory by ID
const singleMemory = await memory.get('6c1c11a2-4fbc-4a2b-8e8a-d60e67e57aaa');
console.log(singleMemory);
```
```json Output
{
"id": "6c1c11a2-4fbc-4a2b-8e8a-d60e67e57aaa",
"memory": "Name is Alex",
"hash": "d0fccc8fa47f7a149ee95750c37bb0ca",
"createdAt": "2025-02-27T16:37:04.378Z",
"updatedAt": undefined,
"metadata": {},
"userId": "user123"
}
```
</CodeGroup>
### Search Memories
<CodeGroup>
```typescript Code
const result = await memory.search('What do you know about me?', 'user123');
console.log(result);
```
```json Output
{
"results": [
{
"id": "28c3eee7-186e-4644-8c5d-13b306233d4e",
"memory": "Name is Alex",
"hash": "d0fccc8fa47f7a149ee95750c37bb0ca",
"createdAt": "2025-02-27T16:43:56.310Z",
"updatedAt": undefined,
"score": 0.08920719231944799,
"metadata": {},
"userId": "user123"
},
{
"id": "f3433da0-45f4-444f-a4bc-59a170890a1f",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"createdAt": "2025-02-27T16:43:56.314Z",
"updatedAt": undefined,
"score": 0.06869761478135689,
"metadata": {},
"userId": "user123"
}
]
}
```
</CodeGroup>
### Update a Memory
<CodeGroup>
```typescript Code
const result = await memory.update(
'6c1c11a2-4fbc-4a2b-8e8a-d60e67e57aaa',
'I love India, it is my favorite country.',
'user123'
);
console.log(result);
```
```json Output
{
"message": "Memory updated successfully!"
}
```
</CodeGroup>
### Memory History
<CodeGroup>
```typescript Code
const history = await memory.history('d2cc4cef-e0c1-47dd-948a-677030482e9e');
console.log(history);
```
```json Output
[
{
"id": 39,
"memory_id": "d2cc4cef-e0c1-47dd-948a-677030482e9e",
"previous_value": "Name is Alex",
"new_value": "Name is Alex Jones",
"action": "UPDATE",
"created_at": "2025-02-27T16:46:15.853Z",
"updated_at": "2025-02-27T16:46:20.909Z",
"is_deleted": 0
},
{
"id": 37,
"memory_id": "d2cc4cef-e0c1-47dd-948a-677030482e9e",
"previous_value": null,
"new_value": "Name is Alex",
"action": "ADD",
"created_at": "2025-02-27T16:46:15.853Z",
"updated_at": null,
"is_deleted": 0
}
]
```
</CodeGroup>
### Delete Memory
```typescript
// Delete a memory by id
await memory.delete('bf4d4092-cf91-4181-bfeb-b6fa2ed3061b');
// Delete all memories for a user
await memory.deleteAll('alice');
```
### Reset Memory
```typescript
await memory.reset(); // Reset all memories
```
## Configuration Parameters
Mem0 offers extensive configuration options to customize its behavior according to your needs. These configurations span across different components like vector stores, language models, embedders, and graph stores.
<AccordionGroup>
<Accordion title="Vector Store Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|-------------|
| `provider` | Vector store provider (e.g., "memory") | "memory" |
| `host` | Host address | "localhost" |
| `port` | Port number | undefined |
</Accordion>
<Accordion title="LLM Configuration">
| Parameter | Description | Provider |
|-----------------------|-----------------------------------------------|-------------------|
| `provider` | LLM provider (e.g., "openai", "anthropic") | All |
| `model` | Model to use | All |
| `temperature` | Temperature of the model | All |
| `apiKey` | API key to use | All |
| `maxTokens` | Tokens to generate | All |
| `topP` | Probability threshold for nucleus sampling | All |
| `topK` | Number of highest probability tokens to keep | All |
| `openaiBaseUrl` | Base URL for OpenAI API | OpenAI |
</Accordion>
<Accordion title="Embedder Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|------------------------------|
| `provider` | Embedding provider | "openai" |
| `model` | Embedding model to use | "text-embedding-3-small" |
| `apiKey` | API key for embedding service | None |
</Accordion>
<Accordion title="General Configuration">
| Parameter | Description | Default |
|------------------|--------------------------------------|----------------------------|
| `historyDbPath` | Path to the history database | "{mem0_dir}/history.db" |
| `version` | API version | "v1.0" |
| `customPrompt` | Custom prompt for memory processing | None |
</Accordion>
<Accordion title="Complete Configuration Example">
```typescript
const config = {
version: 'v1.1',
embedder: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'text-embedding-3-small',
},
},
vectorStore: {
provider: 'memory',
config: {
collectionName: 'memories',
dimension: 1536,
},
},
llm: {
provider: 'openai',
config: {
apiKey: process.env.OPENAI_API_KEY || '',
model: 'gpt-4-turbo-preview',
},
},
historyDbPath: 'memory.db',
customPrompt: "I'm a virtual assistant. I'm here to help you with your queries.",
}
```
</Accordion>
</AccordionGroup>
If you have any questions, please feel free to reach out to us using one of the following methods:
<Snippet file="get-help.mdx" />

View File

@@ -0,0 +1,493 @@
---
title: Python Guide
description: 'Get started with Mem0 quickly!'
icon: "python"
iconType: "solid"
---
> Welcome to the Mem0 quickstart guide. This guide will help you get up and running with Mem0 in no time.
## Installation
To install Mem0, you can use pip. Run the following command in your terminal:
```bash
pip install mem0ai
```
## Basic Usage
### Initialize Mem0
<Tabs>
<Tab title="Basic">
```python
from mem0 import Memory
m = Memory()
```
</Tab>
<Tab title="Advanced">
If you want to run Mem0 in production, initialize using the following method:
Run Qdrant first:
```bash
docker pull qdrant/qdrant
docker run -p 6333:6333 -p 6334:6334 \
-v $(pwd)/qdrant_storage:/qdrant/storage:z \
qdrant/qdrant
```
Then, instantiate memory with qdrant server:
```python
from mem0 import Memory
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333,
}
},
}
m = Memory.from_config(config)
```
</Tab>
<Tab title="Advanced (Graph Memory)">
```python
from mem0 import Memory
config = {
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://---",
"username": "neo4j",
"password": "---"
}
},
"version": "v1.1"
}
m = Memory.from_config(config_dict=config)
```
</Tab>
</Tabs>
### Store a Memory
<CodeGroup>
```python Code
# For a user
result = m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"})
# messages = [
# {"role": "user", "content": "Hi, I'm Alex. I like to play cricket on weekends."},
# {"role": "assistant", "content": "Hello Alex! It's great to know that you enjoy playing cricket on weekends. I'll remember that for future reference."}
# ]
# client.add(messages, user_id="alice")
```
```json Output
{
"results": [
{"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b", "memory": "Likes to play cricket on weekends", "event": "ADD"}
],
"relations": [
{"source": "alice", "relationship": "likes_to_play", "target": "cricket"},
{"source": "alice", "relationship": "plays_on", "target": "weekends"}
]
}
```
</CodeGroup>
### Retrieve Memories
<CodeGroup>
```python Code
# Get all memories
all_memories = m.get_all(user_id="alice")
```
```json Output
{
"results": [
{
"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"metadata": {"category": "hobbies"},
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None,
"user_id": "alice"
}
],
"relations": [
{"source": "alice", "relationship": "likes_to_play", "target": "cricket"},
{"source": "alice", "relationship": "plays_on", "target": "weekends"}
]
}
```
</CodeGroup>
<br />
<CodeGroup>
```python Code
# Get a single memory by ID
specific_memory = m.get("bf4d4092-cf91-4181-bfeb-b6fa2ed3061b")
```
```json Output
{
"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"metadata": {"category": "hobbies"},
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None,
"user_id": "alice"
}
```
</CodeGroup>
### Search Memories
<CodeGroup>
```python Code
related_memories = m.search(query="What are Alice's hobbies?", user_id="alice")
```
```json Output
{
"results": [
{
"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"metadata": {"category": "hobbies"},
"score": 0.30808347,
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None,
"user_id": "alice"
}
],
"relations": [
{"source": "alice", "relationship": "plays_on", "target": "weekends"},
{"source": "alice", "relationship": "likes_to_play", "target": "cricket"}
]
}
```
</CodeGroup>
### Update a Memory
<CodeGroup>
```python Code
result = m.update(memory_id="bf4d4092-cf91-4181-bfeb-b6fa2ed3061b", data="Likes to play tennis on weekends")
```
```json Output
{'message': 'Memory updated successfully!'}
```
</CodeGroup>
### Memory History
<CodeGroup>
```python Code
history = m.history(memory_id="bf4d4092-cf91-4181-bfeb-b6fa2ed3061b")
```
```json Output
[
{
"id": "96d2821d-e551-4089-aa57-9398c421d450",
"memory_id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"old_memory": None,
"new_memory": "Likes to play cricket on weekends",
"event": "ADD",
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None
},
{
"id": "3db4cb58-c0f1-4dd0-b62a-8123068ebfe7",
"memory_id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"old_memory": "Likes to play cricket on weekends",
"new_memory": "Likes to play tennis on weekends",
"event": "UPDATE",
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": "2024-10-28T13:05:46.987978-07:00"
}
]
```
</CodeGroup>
### Delete Memory
```python
# Delete a memory by id
m.delete(memory_id="bf4d4092-cf91-4181-bfeb-b6fa2ed3061b")
# Delete all memories for a user
m.delete_all(user_id="alice")
```
### Reset Memory
```python
m.reset() # Reset all memories
```
## Configuration Parameters
Mem0 offers extensive configuration options to customize its behavior according to your needs. These configurations span across different components like vector stores, language models, embedders, and graph stores.
<AccordionGroup>
<Accordion title="Vector Store Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|-------------|
| `provider` | Vector store provider (e.g., "qdrant") | "qdrant" |
| `host` | Host address | "localhost" |
| `port` | Port number | 6333 |
</Accordion>
<Accordion title="LLM Configuration">
| Parameter | Description | Provider |
|-----------------------|-----------------------------------------------|-------------------|
| `provider` | LLM provider (e.g., "openai", "anthropic") | All |
| `model` | Model to use | All |
| `temperature` | Temperature of the model | All |
| `api_key` | API key to use | All |
| `max_tokens` | Tokens to generate | All |
| `top_p` | Probability threshold for nucleus sampling | All |
| `top_k` | Number of highest probability tokens to keep | All |
| `http_client_proxies` | Allow proxy server settings | AzureOpenAI |
| `models` | List of models | Openrouter |
| `route` | Routing strategy | Openrouter |
| `openrouter_base_url` | Base URL for Openrouter API | Openrouter |
| `site_url` | Site URL | Openrouter |
| `app_name` | Application name | Openrouter |
| `ollama_base_url` | Base URL for Ollama API | Ollama |
| `openai_base_url` | Base URL for OpenAI API | OpenAI |
| `azure_kwargs` | Azure LLM args for initialization | AzureOpenAI |
| `deepseek_base_url` | Base URL for DeepSeek API | DeepSeek |
</Accordion>
<Accordion title="Embedder Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|------------------------------|
| `provider` | Embedding provider | "openai" |
| `model` | Embedding model to use | "text-embedding-3-small" |
| `api_key` | API key for embedding service | None |
</Accordion>
<Accordion title="Graph Store Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|-------------|
| `provider` | Graph store provider (e.g., "neo4j") | "neo4j" |
| `url` | Connection URL | None |
| `username` | Authentication username | None |
| `password` | Authentication password | None |
</Accordion>
<Accordion title="General Configuration">
| Parameter | Description | Default |
|------------------|--------------------------------------|----------------------------|
| `history_db_path` | Path to the history database | "{mem0_dir}/history.db" |
| `version` | API version | "v1.0" |
| `custom_prompt` | Custom prompt for memory processing | None |
</Accordion>
<Accordion title="Complete Configuration Example">
```python
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333
}
},
"llm": {
"provider": "openai",
"config": {
"api_key": "your-api-key",
"model": "gpt-4"
}
},
"embedder": {
"provider": "openai",
"config": {
"api_key": "your-api-key",
"model": "text-embedding-3-small"
}
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://your-instance",
"username": "neo4j",
"password": "password"
}
},
"history_db_path": "/path/to/history.db",
"version": "v1.1",
"custom_prompt": "Optional custom prompt for memory processing"
}
```
</Accordion>
</AccordionGroup>
## Run Mem0 Locally
Please refer to the example [Mem0 with Ollama](../examples/mem0-with-ollama) to run Mem0 locally.
## Chat Completion
Mem0 can be easily integrated into chat applications to enhance conversational agents with structured memory. Mem0's APIs are designed to be compatible with OpenAI's, with the goal of making it easy to leverage Mem0 in applications you may have already built.
If you have a `Mem0 API key`, you can use it to initialize the client. Alternatively, you can initialize Mem0 without an API key if you're using it locally.
Mem0 supports several language models (LLMs) through integration with various [providers](https://litellm.vercel.app/docs/providers).
## Use Mem0 Platform
```python
from mem0.proxy.main import Mem0
client = Mem0(api_key="m0-xxx")
# First interaction: Storing user preferences
messages = [
{
"role": "user",
"content": "I love indian food but I cannot eat pizza since allergic to cheese."
},
]
user_id = "alice"
chat_completion = client.chat.completions.create(messages=messages, model="gpt-4o-mini", user_id=user_id)
# Memory saved after this will look like: "Loves Indian food. Allergic to cheese and cannot eat pizza."
# Second interaction: Leveraging stored memory
messages = [
{
"role": "user",
"content": "Suggest restaurants in San Francisco to eat.",
}
]
chat_completion = client.chat.completions.create(messages=messages, model="gpt-4o-mini", user_id=user_id)
print(chat_completion.choices[0].message.content)
# Answer: You might enjoy Indian restaurants in San Francisco, such as Amber India, Dosa, or Curry Up Now, which offer delicious options without cheese.
```
In this example, you can see how the second response is tailored based on the information provided in the first interaction. Mem0 remembers the user's preference for Indian food and their cheese allergy, using this information to provide more relevant and personalized restaurant suggestions in San Francisco.
### Use Mem0 OSS
```python
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333,
}
},
}
client = Mem0(config=config)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "What's the capital of France?",
}
],
model="gpt-4o",
)
```
## APIs
Get started with using Mem0 APIs in your applications. For more details, refer to the [Platform](/platform/quickstart.mdx).
Here is an example of how to use Mem0 APIs:
```python
import os
from mem0 import MemoryClient
os.environ["MEM0_API_KEY"] = "your-api-key"
client = MemoryClient() # get api_key from https://app.mem0.ai/
# Store messages
messages = [
{"role": "user", "content": "Hi, I'm Alex. I'm a vegetarian and I'm allergic to nuts."},
{"role": "assistant", "content": "Hello Alex! I've noted that you're a vegetarian and have a nut allergy. I'll keep this in mind for any food-related recommendations or discussions."}
]
result = client.add(messages, user_id="alex")
print(result)
# Retrieve memories
all_memories = client.get_all(user_id="alex")
print(all_memories)
# Search memories
query = "What do you know about me?"
related_memories = client.search(query, user_id="alex")
# Get memory history
history = client.history(memory_id="m1")
print(history)
```
## Contributing
We welcome contributions to Mem0! Here's how you can contribute:
1. Fork the repository and create your branch from `main`.
2. Clone the forked repository to your local machine.
3. Install the project dependencies:
```bash
poetry install
```
4. Install pre-commit hooks:
```bash
pip install pre-commit # If pre-commit is not already installed
pre-commit install
```
5. Make your changes and ensure they adhere to the project's coding standards.
6. Run the tests locally:
```bash
poetry run pytest
```
7. If all tests pass, commit your changes and push to your fork.
8. Open a pull request with a clear title and description.
Please make sure your code follows our coding conventions and is well-documented. We appreciate your contributions to make Mem0 better!
If you have any questions, please feel free to reach out to us using one of the following methods:
<Snippet file="get-help.mdx" />

View File

@@ -1,493 +1,29 @@
---
title: Guide
description: 'Get started with Mem0 quickly!'
icon: "book"
title: Overview
icon: "info"
iconType: "solid"
---
> Welcome to the Mem0 quickstart guide. This guide will help you get up and running with Mem0 in no time.
Welcome to Mem0 Open Source - a powerful, self-hosted memory management solution for AI agents and assistants. With Mem0 OSS, you get full control over your infrastructure while maintaining complete customization flexibility.
## Installation
We offer two SDKs for Python and Node.js.
To install Mem0, you can use pip. Run the following command in your terminal:
Check out our [GitHub repository](https://mem0.dev/gd) to explore the source code.
```bash
pip install mem0ai
```
<CardGroup cols={2}>
<Card title="Python SDK Guide" icon="python" href="/open-source/python-quickstart">
Learn more about Mem0 OSS Python SDK
</Card>
<Card title="Node.js SDK Guide" icon="node" href="/open-source-typescript/quickstart">
Learn more about Mem0 OSS Node.js SDK
</Card>
</CardGroup>
## Basic Usage
## Key Features
### Initialize Mem0
- **Full Infrastructure Control**: Host Mem0 on your own servers
- **Customizable Implementation**: Modify and extend functionality as needed
- **Local Development**: Perfect for development and testing
- **No Vendor Lock-in**: Own your data and infrastructure
- **Community Driven**: Benefit from and contribute to community improvements
<Tabs>
<Tab title="Basic">
```python
from mem0 import Memory
m = Memory()
```
</Tab>
<Tab title="Advanced">
If you want to run Mem0 in production, initialize using the following method:
Run Qdrant first:
```bash
docker pull qdrant/qdrant
docker run -p 6333:6333 -p 6334:6334 \
-v $(pwd)/qdrant_storage:/qdrant/storage:z \
qdrant/qdrant
```
Then, instantiate memory with qdrant server:
```python
from mem0 import Memory
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333,
}
},
}
m = Memory.from_config(config)
```
</Tab>
<Tab title="Advanced (Graph Memory)">
```python
from mem0 import Memory
config = {
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://---",
"username": "neo4j",
"password": "---"
}
},
"version": "v1.1"
}
m = Memory.from_config(config_dict=config)
```
</Tab>
</Tabs>
### Store a Memory
<CodeGroup>
```python Code
# For a user
result = m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"})
# messages = [
# {"role": "user", "content": "Hi, I'm Alex. I like to play cricket on weekends."},
# {"role": "assistant", "content": "Hello Alex! It's great to know that you enjoy playing cricket on weekends. I'll remember that for future reference."}
# ]
# client.add(messages, user_id="alice")
```
```json Output
{
"results": [
{"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b", "memory": "Likes to play cricket on weekends", "event": "ADD"}
],
"relations": [
{"source": "alice", "relationship": "likes_to_play", "target": "cricket"},
{"source": "alice", "relationship": "plays_on", "target": "weekends"}
]
}
```
</CodeGroup>
### Retrieve Memories
<CodeGroup>
```python Code
# Get all memories
all_memories = m.get_all(user_id="alice")
```
```json Output
{
"results": [
{
"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"metadata": {"category": "hobbies"},
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None,
"user_id": "alice"
}
],
"relations": [
{"source": "alice", "relationship": "likes_to_play", "target": "cricket"},
{"source": "alice", "relationship": "plays_on", "target": "weekends"}
]
}
```
</CodeGroup>
<br />
<CodeGroup>
```python Code
# Get a single memory by ID
specific_memory = m.get("bf4d4092-cf91-4181-bfeb-b6fa2ed3061b")
```
```json Output
{
"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"metadata": {"category": "hobbies"},
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None,
"user_id": "alice"
}
```
</CodeGroup>
### Search Memories
<CodeGroup>
```python Code
related_memories = m.search(query="What are Alice's hobbies?", user_id="alice")
```
```json Output
{
"results": [
{
"id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"memory": "Likes to play cricket on weekends",
"hash": "285d07801ae42054732314853e9eadd7",
"metadata": {"category": "hobbies"},
"score": 0.30808347,
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None,
"user_id": "alice"
}
],
"relations": [
{"source": "alice", "relationship": "plays_on", "target": "weekends"},
{"source": "alice", "relationship": "likes_to_play", "target": "cricket"}
]
}
```
</CodeGroup>
### Update a Memory
<CodeGroup>
```python Code
result = m.update(memory_id="bf4d4092-cf91-4181-bfeb-b6fa2ed3061b", data="Likes to play tennis on weekends")
```
```json Output
{'message': 'Memory updated successfully!'}
```
</CodeGroup>
### Memory History
<CodeGroup>
```python Code
history = m.history(memory_id="bf4d4092-cf91-4181-bfeb-b6fa2ed3061b")
```
```json Output
[
{
"id": "96d2821d-e551-4089-aa57-9398c421d450",
"memory_id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"old_memory": None,
"new_memory": "Likes to play cricket on weekends",
"event": "ADD",
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": None
},
{
"id": "3db4cb58-c0f1-4dd0-b62a-8123068ebfe7",
"memory_id": "bf4d4092-cf91-4181-bfeb-b6fa2ed3061b",
"old_memory": "Likes to play cricket on weekends",
"new_memory": "Likes to play tennis on weekends",
"event": "UPDATE",
"created_at": "2024-10-28T12:32:07.744891-07:00",
"updated_at": "2024-10-28T13:05:46.987978-07:00"
}
]
```
</CodeGroup>
### Delete Memory
```python
# Delete a memory by id
m.delete(memory_id="bf4d4092-cf91-4181-bfeb-b6fa2ed3061b")
# Delete all memories for a user
m.delete_all(user_id="alice")
```
### Reset Memory
```python
m.reset() # Reset all memories
```
## Configuration Parameters
Mem0 offers extensive configuration options to customize its behavior according to your needs. These configurations span across different components like vector stores, language models, embedders, and graph stores.
<AccordionGroup>
<Accordion title="Vector Store Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|-------------|
| `provider` | Vector store provider (e.g., "qdrant") | "qdrant" |
| `host` | Host address | "localhost" |
| `port` | Port number | 6333 |
</Accordion>
<Accordion title="LLM Configuration">
| Parameter | Description | Provider |
|-----------------------|-----------------------------------------------|-------------------|
| `provider` | LLM provider (e.g., "openai", "anthropic") | All |
| `model` | Model to use | All |
| `temperature` | Temperature of the model | All |
| `api_key` | API key to use | All |
| `max_tokens` | Tokens to generate | All |
| `top_p` | Probability threshold for nucleus sampling | All |
| `top_k` | Number of highest probability tokens to keep | All |
| `http_client_proxies` | Allow proxy server settings | AzureOpenAI |
| `models` | List of models | Openrouter |
| `route` | Routing strategy | Openrouter |
| `openrouter_base_url` | Base URL for Openrouter API | Openrouter |
| `site_url` | Site URL | Openrouter |
| `app_name` | Application name | Openrouter |
| `ollama_base_url` | Base URL for Ollama API | Ollama |
| `openai_base_url` | Base URL for OpenAI API | OpenAI |
| `azure_kwargs` | Azure LLM args for initialization | AzureOpenAI |
| `deepseek_base_url` | Base URL for DeepSeek API | DeepSeek |
</Accordion>
<Accordion title="Embedder Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|------------------------------|
| `provider` | Embedding provider | "openai" |
| `model` | Embedding model to use | "text-embedding-3-small" |
| `api_key` | API key for embedding service | None |
</Accordion>
<Accordion title="Graph Store Configuration">
| Parameter | Description | Default |
|-------------|---------------------------------|-------------|
| `provider` | Graph store provider (e.g., "neo4j") | "neo4j" |
| `url` | Connection URL | None |
| `username` | Authentication username | None |
| `password` | Authentication password | None |
</Accordion>
<Accordion title="General Configuration">
| Parameter | Description | Default |
|------------------|--------------------------------------|----------------------------|
| `history_db_path` | Path to the history database | "{mem0_dir}/history.db" |
| `version` | API version | "v1.0" |
| `custom_prompt` | Custom prompt for memory processing | None |
</Accordion>
<Accordion title="Complete Configuration Example">
```python
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333
}
},
"llm": {
"provider": "openai",
"config": {
"api_key": "your-api-key",
"model": "gpt-4"
}
},
"embedder": {
"provider": "openai",
"config": {
"api_key": "your-api-key",
"model": "text-embedding-3-small"
}
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://your-instance",
"username": "neo4j",
"password": "password"
}
},
"history_db_path": "/path/to/history.db",
"version": "v1.1",
"custom_prompt": "Optional custom prompt for memory processing"
}
```
</Accordion>
</AccordionGroup>
## Run Mem0 Locally
Please refer to the example [Mem0 with Ollama](../examples/mem0-with-ollama) to run Mem0 locally.
## Chat Completion
Mem0 can be easily integrated into chat applications to enhance conversational agents with structured memory. Mem0's APIs are designed to be compatible with OpenAI's, with the goal of making it easy to leverage Mem0 in applications you may have already built.
If you have a `Mem0 API key`, you can use it to initialize the client. Alternatively, you can initialize Mem0 without an API key if you're using it locally.
Mem0 supports several language models (LLMs) through integration with various [providers](https://litellm.vercel.app/docs/providers).
## Use Mem0 Platform
```python
from mem0.proxy.main import Mem0
client = Mem0(api_key="m0-xxx")
# First interaction: Storing user preferences
messages = [
{
"role": "user",
"content": "I love indian food but I cannot eat pizza since allergic to cheese."
},
]
user_id = "alice"
chat_completion = client.chat.completions.create(messages=messages, model="gpt-4o-mini", user_id=user_id)
# Memory saved after this will look like: "Loves Indian food. Allergic to cheese and cannot eat pizza."
# Second interaction: Leveraging stored memory
messages = [
{
"role": "user",
"content": "Suggest restaurants in San Francisco to eat.",
}
]
chat_completion = client.chat.completions.create(messages=messages, model="gpt-4o-mini", user_id=user_id)
print(chat_completion.choices[0].message.content)
# Answer: You might enjoy Indian restaurants in San Francisco, such as Amber India, Dosa, or Curry Up Now, which offer delicious options without cheese.
```
In this example, you can see how the second response is tailored based on the information provided in the first interaction. Mem0 remembers the user's preference for Indian food and their cheese allergy, using this information to provide more relevant and personalized restaurant suggestions in San Francisco.
### Use Mem0 OSS
```python
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333,
}
},
}
client = Mem0(config=config)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "What's the capital of France?",
}
],
model="gpt-4o",
)
```
## APIs
Get started with using Mem0 APIs in your applications. For more details, refer to the [Platform](/platform/quickstart.mdx).
Here is an example of how to use Mem0 APIs:
```python
import os
from mem0 import MemoryClient
os.environ["MEM0_API_KEY"] = "your-api-key"
client = MemoryClient() # get api_key from https://app.mem0.ai/
# Store messages
messages = [
{"role": "user", "content": "Hi, I'm Alex. I'm a vegetarian and I'm allergic to nuts."},
{"role": "assistant", "content": "Hello Alex! I've noted that you're a vegetarian and have a nut allergy. I'll keep this in mind for any food-related recommendations or discussions."}
]
result = client.add(messages, user_id="alex")
print(result)
# Retrieve memories
all_memories = client.get_all(user_id="alex")
print(all_memories)
# Search memories
query = "What do you know about me?"
related_memories = client.search(query, user_id="alex")
# Get memory history
history = client.history(memory_id="m1")
print(history)
```
## Contributing
We welcome contributions to Mem0! Here's how you can contribute:
1. Fork the repository and create your branch from `main`.
2. Clone the forked repository to your local machine.
3. Install the project dependencies:
```bash
poetry install
```
4. Install pre-commit hooks:
```bash
pip install pre-commit # If pre-commit is not already installed
pre-commit install
```
5. Make your changes and ensure they adhere to the project's coding standards.
6. Run the tests locally:
```bash
poetry run pytest
```
7. If all tests pass, commit your changes and push to your fork.
8. Open a pull request with a clear title and description.
Please make sure your code follows our coding conventions and is well-documented. We appreciate your contributions to make Mem0 better!
If you have any questions, please feel free to reach out to us using one of the following methods:
<Snippet file="get-help.mdx" />

View File

@@ -114,12 +114,18 @@ curl -X POST "https://api.mem0.ai/v1/memories/" \
```
```json Output
[{'id': '24e466b5-e1c6-4bde-8a92-f09a327ffa60',
'memory': 'Does not like cheese',
'event': 'ADD'},
{'id': 'e8d78459-fadd-4c5a-bece-abb8c3dc7ed7',
'memory': 'Lives in San Francisco',
'event': 'ADD'}]
[
{
"id": "24e466b5-e1c6-4bde-8a92-f09a327ffa60",
"memory": "Does not like cheese",
"event": "ADD"
},
{
"id": "e8d78459-fadd-4c5a-bece-abb8c3dc7ed7",
"memory": "Lives in San Francisco",
"event": "ADD"
}
]
```
</CodeGroup>
</Accordion>
@@ -288,9 +294,15 @@ Follow the steps below to get started with Mem0 Open Source:
<AccordionGroup>
<Accordion title="Install package">
```bash
<CodeGroup>
```bash pip
pip install mem0ai
```
```bash npm
npm install mem0ai
```
</CodeGroup>
</Accordion>
</AccordionGroup>
@@ -298,10 +310,17 @@ pip install mem0ai
<AccordionGroup>
<Accordion title="Instantiate client">
<CodeGroup>
```python Python
from mem0 import Memory
m = Memory()
```
```typescript TypeScript
import { Memory } from 'mem0ai/oss';
const memory = new Memory();
```
</CodeGroup>
</Accordion>
<Accordion title="Add memories">
<CodeGroup>
@@ -310,13 +329,23 @@ m = Memory()
result = m.add("I like to drink coffee in the morning and go for a walk.", user_id="alice", metadata={"category": "preferences"})
```
```typescript TypeScript
const result = memory.add("I like to drink coffee in the morning and go for a walk.", 'alice');
```
```json Output
[{'id': '3dc6f65f-fb3f-4e91-89a8-ed1a22f8898a',
'data': {'memory': 'Likes to drink coffee in the morning'},
'event': 'ADD'},
{'id': 'f1673706-e3d6-4f12-a767-0384c7697d53',
'data': {'memory': 'Likes to go for a walk'},
'event': 'ADD'}]
[
{
"id": "3dc6f65f-fb3f-4e91-89a8-ed1a22f8898a",
"data": {"memory": "Likes to drink coffee in the morning"},
"event": "ADD"
},
{
"id": "f1673706-e3d6-4f12-a767-0384c7697d53",
"data": {"memory": "Likes to go for a walk"},
"event": "ADD"
}
]
```
</CodeGroup>
</Accordion>
@@ -327,33 +356,37 @@ result = m.add("I like to drink coffee in the morning and go for a walk.", user_
<AccordionGroup>
<Accordion title="Search for relevant memories">
<CodeGroup>
```python Code
```python Python
related_memories = m.search("Should I drink coffee or tea?", user_id="alice")
```
```typescript TypeScript
const relatedMemories = memory.search("Should I drink coffee or tea?", "alice");
```
```json Output
[
{
'id': '3dc6f65f-fb3f-4e91-89a8-ed1a22f8898a',
'memory': 'Likes to drink coffee in the morning',
'user_id': 'alice',
'metadata': {'category': 'preferences'},
'categories': ['user_preferences', 'food'],
'immutable': False,
'created_at': '2025-02-24T20:11:39.010261-08:00',
'updated_at': '2025-02-24T20:11:39.010274-08:00',
'score': 0.5915589089130715
"id": "3dc6f65f-fb3f-4e91-89a8-ed1a22f8898a",
"memory": "Likes to drink coffee in the morning",
"user_id": "alice",
"metadata": {"category": "preferences"},
"categories": ["user_preferences", "food"],
"immutable": false,
"created_at": "2025-02-24T20:11:39.010261-08:00",
"updated_at": "2025-02-24T20:11:39.010274-08:00",
"score": 0.5915589089130715
},
{
'id': 'e8d78459-fadd-4c5a-bece-abb8c3dc7ed7',
'memory': 'Likes to go for a walk',
'user_id': 'alice',
'metadata': {'category': 'preferences'},
'categories': ['hobby', 'food'],
'immutable': False,
'created_at': '2025-02-24T11:47:52.893038-08:00',
'updated_at': '2025-02-24T11:47:52.893048-08:00',
'score': 0.43263634637810866
"id": "e8d78459-fadd-4c5a-bece-abb8c3dc7ed7",
"memory": "Likes to go for a walk",
"user_id": "alice",
"metadata": {"category": "preferences"},
"categories": ["hobby", "food"],
"immutable": false,
"created_at": "2025-02-24T11:47:52.893038-08:00",
"updated_at": "2025-02-24T11:47:52.893048-08:00",
"score": 0.43263634637810866
}
]
```
@@ -362,6 +395,11 @@ related_memories = m.search("Should I drink coffee or tea?", user_id="alice")
</Accordion>
</AccordionGroup>
<Card title="Mem0 Open source" icon="code-branch" href="/open-source/overview">
Learn more about Mem0 open source
<CardGroup cols={2}>
<Card title="Mem0 OSS Python SDK" icon="python" href="/open-source/python-quickstart">
Learn more about Mem0 OSS Python SDK
</Card>
<Card title="Mem0 OSS Node.js SDK" icon="node" href="/open-source-typescript/quickstart">
Learn more about Mem0 OSS Node.js SDK
</Card>
</CardGroup>