(Update) Vercel AI SDK Memory Saving Algo (#2082)
This commit is contained in:
@@ -64,10 +64,13 @@ npm install @mem0/vercel-ai-provider
|
|||||||
### Standalone Features:
|
### Standalone Features:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
await addMemories(messages, { user_id: "borat", mem0ApiKey: "m0-xxx" });
|
await addMemories(messages, { user_id: "borat", mem0ApiKey: "m0-xxx", org_id: "org_xx", project_id: "proj_xx" });
|
||||||
await retrieveMemories(prompt, { user_id: "borat", mem0ApiKey: "m0-xxx" });
|
await retrieveMemories(prompt, { user_id: "borat", mem0ApiKey: "m0-xxx", org_id: "org_xx", project_id: "proj_xx" });
|
||||||
|
await getMemories(prompt, { user_id: "borat", mem0ApiKey: "m0-xxx", org_id: "org_xx", project_id: "proj_xx" });
|
||||||
```
|
```
|
||||||
> **Note**: For standalone features, such as `addMemories` and `retrieveMemories`, you must either set `MEM0_API_KEY` as an environment variable or pass it directly in the function call.
|
> For standalone features, such as `addMemories`, `retrieveMemories`, and `getMemories`, you must either set `MEM0_API_KEY` as an environment variable or pass it directly in the function call.
|
||||||
|
|
||||||
|
> `getMemories` will return raw memories in the form of an array of objects, while `retrieveMemories` will return a response in string format with a system prompt ingested with the retrieved memories.
|
||||||
|
|
||||||
### 1. Basic Text Generation with Memory Context
|
### 1. Basic Text Generation with Memory Context
|
||||||
|
|
||||||
@@ -146,6 +149,7 @@ npm install @mem0/vercel-ai-provider
|
|||||||
|
|
||||||
- `createMem0()`: Initializes a new Mem0 provider instance.
|
- `createMem0()`: Initializes a new Mem0 provider instance.
|
||||||
- `retrieveMemories()`: Retrieves memory context for prompts.
|
- `retrieveMemories()`: Retrieves memory context for prompts.
|
||||||
|
- `getMemories()`: Get memories from your profile in array format.
|
||||||
- `addMemories()`: Adds user memories to enhance contextual responses.
|
- `addMemories()`: Adds user memories to enhance contextual responses.
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|||||||
@@ -81,10 +81,15 @@ you must either set `MEM0_API_KEY` as an environment variable or pass it directl
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
await addMemories(messages, { user_id: "borat", mem0ApiKey: "m0-xxx" });
|
await addMemories(messages, { user_id: "borat", mem0ApiKey: "m0-xxx", org_id: "org_xx", project_id: "proj_xx" });
|
||||||
await retrieveMemories(prompt, { user_id: "borat", mem0ApiKey: "m0-xxx" });
|
await retrieveMemories(prompt, { user_id: "borat", mem0ApiKey: "m0-xxx", org_id: "org_xx", project_id: "proj_xx" });
|
||||||
|
await getMemories(prompt, { user_id: "borat", mem0ApiKey: "m0-xxx", org_id: "org_xx", project_id: "proj_xx" });
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Note:
|
||||||
|
|
||||||
|
`retrieveMemories` enriches the prompt with relevant memories from your profile, while `getMemories` returns the memories in array format which can be used for further processing.
|
||||||
|
|
||||||
## Usage Examples
|
## Usage Examples
|
||||||
|
|
||||||
### 1. Basic Text Generation with Memory Context
|
### 1. Basic Text Generation with Memory Context
|
||||||
@@ -199,6 +204,7 @@ for await (const textPart of textStream) {
|
|||||||
- `createMem0()`: Initializes a new mem0 provider instance with optional configuration
|
- `createMem0()`: Initializes a new mem0 provider instance with optional configuration
|
||||||
- `retrieveMemories()`: Enriches prompts with relevant memories
|
- `retrieveMemories()`: Enriches prompts with relevant memories
|
||||||
- `addMemories()`: Add memories to your profile
|
- `addMemories()`: Add memories to your profile
|
||||||
|
- `getMemories()`: Get memories from your profile in array format
|
||||||
|
|
||||||
## Configuration Options
|
## Configuration Options
|
||||||
|
|
||||||
@@ -225,4 +231,4 @@ We also have support for `agent_id`, `app_id`, and `run_id`. Refer [Docs](https:
|
|||||||
- Requires proper API key configuration for underlying providers (e.g., OpenAI)
|
- Requires proper API key configuration for underlying providers (e.g., OpenAI)
|
||||||
- Memory features depend on proper user identification via `user_id`
|
- Memory features depend on proper user identification via `user_id`
|
||||||
- Supports both streaming and non-streaming responses
|
- Supports both streaming and non-streaming responses
|
||||||
- Compatible with all Vercel AI SDK features and patterns
|
- Compatible with all Vercel AI SDK features and patterns
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@mem0/vercel-ai-provider",
|
"name": "@mem0/vercel-ai-provider",
|
||||||
"version": "0.0.7",
|
"version": "0.0.9",
|
||||||
"description": "Vercel AI Provider for providing memory to LLMs",
|
"description": "Vercel AI Provider for providing memory to LLMs",
|
||||||
"main": "./dist/index.js",
|
"main": "./dist/index.js",
|
||||||
"module": "./dist/index.mjs",
|
"module": "./dist/index.mjs",
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
export * from './mem0-facade'
|
export * from './mem0-facade'
|
||||||
export type { Mem0Provider, Mem0ProviderSettings } from './mem0-provider'
|
export type { Mem0Provider, Mem0ProviderSettings } from './mem0-provider'
|
||||||
export { createMem0, mem0 } from './mem0-provider'
|
export { createMem0, mem0 } from './mem0-provider'
|
||||||
export {addMemories, retrieveMemories, searchMemories } from './mem0-utils'
|
export {addMemories, retrieveMemories, searchMemories, getMemories } from './mem0-utils'
|
||||||
@@ -31,6 +31,8 @@ export interface Mem0ChatSettings extends OpenAIChatSettings {
|
|||||||
project_name?: string;
|
project_name?: string;
|
||||||
mem0ApiKey?: string;
|
mem0ApiKey?: string;
|
||||||
structuredOutputs?: boolean;
|
structuredOutputs?: boolean;
|
||||||
|
org_id?: string;
|
||||||
|
project_id?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface Mem0Config extends Mem0ChatSettings {}
|
export interface Mem0Config extends Mem0ChatSettings {}
|
||||||
@@ -28,6 +28,34 @@ const flattenPrompt = (prompt: LanguageModelV1Prompt) => {
|
|||||||
}).join(" ");
|
}).join(" ");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function convertMessagesToMem0Format(messages: LanguageModelV1Prompt) {
|
||||||
|
return messages.map((message) => {
|
||||||
|
// If the content is a string, return it as is
|
||||||
|
if (typeof message.content === "string") {
|
||||||
|
return message;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flatten the content array into a single string
|
||||||
|
if (Array.isArray(message.content)) {
|
||||||
|
message.content = message.content
|
||||||
|
.map((contentItem) => {
|
||||||
|
if ("text" in contentItem) {
|
||||||
|
return contentItem.text;
|
||||||
|
}
|
||||||
|
return "";
|
||||||
|
})
|
||||||
|
.join(" ");
|
||||||
|
}
|
||||||
|
|
||||||
|
const contentText = message.content;
|
||||||
|
|
||||||
|
return {
|
||||||
|
role: message.role,
|
||||||
|
content: contentText,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
const searchInternalMemories = async (query: string, config?: Mem0Config, top_k: number = 5)=> {
|
const searchInternalMemories = async (query: string, config?: Mem0Config, top_k: number = 5)=> {
|
||||||
tokenIsPresent(config);
|
tokenIsPresent(config);
|
||||||
const filters = {
|
const filters = {
|
||||||
@@ -46,10 +74,16 @@ const searchInternalMemories = async (query: string, config?: Mem0Config, top_k:
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
|
const org_project_filters = {
|
||||||
|
org_id: config&&config.org_id,
|
||||||
|
project_id: config&&config.project_id,
|
||||||
|
org_name: !config?.org_id ? config&&config.org_name : undefined, // deprecated
|
||||||
|
project_name: !config?.org_id ? config&&config.project_name : undefined, // deprecated
|
||||||
|
}
|
||||||
const options = {
|
const options = {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: {Authorization: `Token ${(config&&config.mem0ApiKey) || (typeof process !== 'undefined' && process.env && process.env.MEM0_API_KEY) || ""}`, 'Content-Type': 'application/json'},
|
headers: {Authorization: `Token ${(config&&config.mem0ApiKey) || (typeof process !== 'undefined' && process.env && process.env.MEM0_API_KEY) || ""}`, 'Content-Type': 'application/json'},
|
||||||
body: JSON.stringify({query, filters, top_k, version: "v2", org_name: config&&config.org_name, project_name: config&&config.project_name}),
|
body: JSON.stringify({query, filters, top_k, version: "v2", ...org_project_filters}),
|
||||||
};
|
};
|
||||||
const response = await fetch('https://api.mem0.ai/v2/memories/search/', options);
|
const response = await fetch('https://api.mem0.ai/v2/memories/search/', options);
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
@@ -94,9 +128,26 @@ const retrieveMemories = async (prompt: LanguageModelV1Prompt | string, config?:
|
|||||||
console.error("Error while parsing memories");
|
console.error("Error while parsing memories");
|
||||||
// console.log(e);
|
// console.log(e);
|
||||||
}
|
}
|
||||||
|
if(memories.length === 0){
|
||||||
|
return "";
|
||||||
|
}
|
||||||
return `System Message: ${systemPrompt} ${memoriesText}`;
|
return `System Message: ${systemPrompt} ${memoriesText}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const getMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0Config)=>{
|
||||||
|
tokenIsPresent(config);
|
||||||
|
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
|
||||||
|
let memories = [];
|
||||||
|
try{
|
||||||
|
// @ts-ignore
|
||||||
|
memories = await searchInternalMemories(message, config);
|
||||||
|
}
|
||||||
|
catch(e){
|
||||||
|
console.error("Error while searching memories");
|
||||||
|
}
|
||||||
|
return memories;
|
||||||
|
}
|
||||||
|
|
||||||
const searchMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0Config)=>{
|
const searchMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0Config)=>{
|
||||||
tokenIsPresent(config);
|
tokenIsPresent(config);
|
||||||
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
|
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
|
||||||
@@ -111,4 +162,4 @@ const searchMemories = async (prompt: LanguageModelV1Prompt | string, config?: M
|
|||||||
return memories;
|
return memories;
|
||||||
}
|
}
|
||||||
|
|
||||||
export {addMemories, updateMemories, retrieveMemories, flattenPrompt, searchMemories};
|
export {addMemories, updateMemories, retrieveMemories, flattenPrompt, searchMemories, convertMessagesToMem0Format, getMemories};
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
import { createOpenAI, OpenAIProviderSettings } from "@ai-sdk/openai";
|
import { createOpenAI, OpenAIProviderSettings } from "@ai-sdk/openai";
|
||||||
import { generateText as aiGenerateText, streamText as aiStreamText, LanguageModelV1Prompt } from "ai";
|
import { generateText as aiGenerateText, streamText as aiStreamText, LanguageModelV1Prompt } from "ai";
|
||||||
import { updateMemories, retrieveMemories, flattenPrompt } from "./mem0-utils";
|
import { updateMemories, retrieveMemories, flattenPrompt, convertMessagesToMem0Format } from "./mem0-utils";
|
||||||
import { Mem0Config } from "./mem0-chat-settings";
|
import { Mem0Config } from "./mem0-chat-settings";
|
||||||
import { Mem0ProviderSettings } from "./mem0-provider";
|
import { Mem0ProviderSettings } from "./mem0-provider";
|
||||||
import { CohereProviderSettings, createCohere } from "@ai-sdk/cohere";
|
import { CohereProviderSettings, createCohere } from "@ai-sdk/cohere";
|
||||||
@@ -73,10 +73,9 @@ class Mem0AITextGenerator {
|
|||||||
system: newPrompt
|
system: newPrompt
|
||||||
});
|
});
|
||||||
|
|
||||||
await updateMemories([
|
const mem0Prompts = convertMessagesToMem0Format(prompt);
|
||||||
{ role: "user", content: flattenPromptResponse },
|
|
||||||
{ role: "assistant", content: response.text },
|
await updateMemories(mem0Prompts as any, config);
|
||||||
], config);
|
|
||||||
|
|
||||||
return response;
|
return response;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
Reference in New Issue
Block a user