(Update) Vercel AI SDK Memory Saving Algo (#2082)

This commit is contained in:
Saket Aryan
2024-12-15 11:14:16 +05:30
committed by GitHub
parent 400b1f4eac
commit 763f804277
7 changed files with 78 additions and 16 deletions

View File

@@ -1,4 +1,4 @@
export * from './mem0-facade'
export type { Mem0Provider, Mem0ProviderSettings } from './mem0-provider'
export { createMem0, mem0 } from './mem0-provider'
export {addMemories, retrieveMemories, searchMemories } from './mem0-utils'
export {addMemories, retrieveMemories, searchMemories, getMemories } from './mem0-utils'

View File

@@ -31,6 +31,8 @@ export interface Mem0ChatSettings extends OpenAIChatSettings {
project_name?: string;
mem0ApiKey?: string;
structuredOutputs?: boolean;
org_id?: string;
project_id?: string;
}
export interface Mem0Config extends Mem0ChatSettings {}
export interface Mem0Config extends Mem0ChatSettings {}

View File

@@ -28,6 +28,34 @@ const flattenPrompt = (prompt: LanguageModelV1Prompt) => {
}).join(" ");
}
function convertMessagesToMem0Format(messages: LanguageModelV1Prompt) {
return messages.map((message) => {
// If the content is a string, return it as is
if (typeof message.content === "string") {
return message;
}
// Flatten the content array into a single string
if (Array.isArray(message.content)) {
message.content = message.content
.map((contentItem) => {
if ("text" in contentItem) {
return contentItem.text;
}
return "";
})
.join(" ");
}
const contentText = message.content;
return {
role: message.role,
content: contentText,
};
});
}
const searchInternalMemories = async (query: string, config?: Mem0Config, top_k: number = 5)=> {
tokenIsPresent(config);
const filters = {
@@ -46,10 +74,16 @@ const searchInternalMemories = async (query: string, config?: Mem0Config, top_k:
},
],
};
const org_project_filters = {
org_id: config&&config.org_id,
project_id: config&&config.project_id,
org_name: !config?.org_id ? config&&config.org_name : undefined, // deprecated
project_name: !config?.org_id ? config&&config.project_name : undefined, // deprecated
}
const options = {
method: 'POST',
headers: {Authorization: `Token ${(config&&config.mem0ApiKey) || (typeof process !== 'undefined' && process.env && process.env.MEM0_API_KEY) || ""}`, 'Content-Type': 'application/json'},
body: JSON.stringify({query, filters, top_k, version: "v2", org_name: config&&config.org_name, project_name: config&&config.project_name}),
body: JSON.stringify({query, filters, top_k, version: "v2", ...org_project_filters}),
};
const response = await fetch('https://api.mem0.ai/v2/memories/search/', options);
const data = await response.json();
@@ -94,9 +128,26 @@ const retrieveMemories = async (prompt: LanguageModelV1Prompt | string, config?:
console.error("Error while parsing memories");
// console.log(e);
}
if(memories.length === 0){
return "";
}
return `System Message: ${systemPrompt} ${memoriesText}`;
}
const getMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0Config)=>{
tokenIsPresent(config);
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
let memories = [];
try{
// @ts-ignore
memories = await searchInternalMemories(message, config);
}
catch(e){
console.error("Error while searching memories");
}
return memories;
}
const searchMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0Config)=>{
tokenIsPresent(config);
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
@@ -111,4 +162,4 @@ const searchMemories = async (prompt: LanguageModelV1Prompt | string, config?: M
return memories;
}
export {addMemories, updateMemories, retrieveMemories, flattenPrompt, searchMemories};
export {addMemories, updateMemories, retrieveMemories, flattenPrompt, searchMemories, convertMessagesToMem0Format, getMemories};

View File

@@ -1,6 +1,6 @@
import { createOpenAI, OpenAIProviderSettings } from "@ai-sdk/openai";
import { generateText as aiGenerateText, streamText as aiStreamText, LanguageModelV1Prompt } from "ai";
import { updateMemories, retrieveMemories, flattenPrompt } from "./mem0-utils";
import { updateMemories, retrieveMemories, flattenPrompt, convertMessagesToMem0Format } from "./mem0-utils";
import { Mem0Config } from "./mem0-chat-settings";
import { Mem0ProviderSettings } from "./mem0-provider";
import { CohereProviderSettings, createCohere } from "@ai-sdk/cohere";
@@ -73,10 +73,9 @@ class Mem0AITextGenerator {
system: newPrompt
});
await updateMemories([
{ role: "user", content: flattenPromptResponse },
{ role: "assistant", content: response.text },
], config);
const mem0Prompts = convertMessagesToMem0Format(prompt);
await updateMemories(mem0Prompts as any, config);
return response;
} catch (error) {