diff --git a/vercel-ai-sdk/package.json b/vercel-ai-sdk/package.json index c7312a85..fc3bb50b 100644 --- a/vercel-ai-sdk/package.json +++ b/vercel-ai-sdk/package.json @@ -1,6 +1,6 @@ { "name": "@mem0/vercel-ai-provider", - "version": "0.0.9", + "version": "0.0.10", "description": "Vercel AI Provider for providing memory to LLMs", "main": "./dist/index.js", "module": "./dist/index.mjs", diff --git a/vercel-ai-sdk/src/mem0-chat-settings.ts b/vercel-ai-sdk/src/mem0-chat-settings.ts index 4fe1733a..0cfbe226 100644 --- a/vercel-ai-sdk/src/mem0-chat-settings.ts +++ b/vercel-ai-sdk/src/mem0-chat-settings.ts @@ -33,6 +33,11 @@ export interface Mem0ChatSettings extends OpenAIChatSettings { structuredOutputs?: boolean; org_id?: string; project_id?: string; + metadata?: Record; + filters?: Record; + infer?: boolean; + page?: number; + page_size?: number; } export interface Mem0Config extends Mem0ChatSettings {} \ No newline at end of file diff --git a/vercel-ai-sdk/src/mem0-utils.ts b/vercel-ai-sdk/src/mem0-utils.ts index 4b49d43d..36f1e4ab 100644 --- a/vercel-ai-sdk/src/mem0-utils.ts +++ b/vercel-ai-sdk/src/mem0-utils.ts @@ -28,6 +28,28 @@ const flattenPrompt = (prompt: LanguageModelV1Prompt) => { }).join(" "); } +const convertToMem0Format = (messages: LanguageModelV1Prompt) => { + return messages.flatMap((message: any) => { + if (typeof message.content === 'string') { + return { + role: message.role, + content: message.content, + }; + } + else{ + return message.content.map((obj: any) => { + if (obj.type === "text") { + return { + role: message.role, + content: obj.text, + }; + } else { + return null; // Handle other cases or return null/undefined as needed + } + }).filter((item: null) => item !== null); // Filter out null values if necessary + } +})}; + function convertMessagesToMem0Format(messages: LanguageModelV1Prompt) { return messages.map((message) => { // If the content is a string, return it as is @@ -92,11 +114,13 @@ const searchInternalMemories = async (query: string, config?: Mem0Config, top_k: const addMemories = async (messages: LanguageModelV1Prompt, config?: Mem0Config)=>{ tokenIsPresent(config); - const message = flattenPrompt(messages); - const response = await updateMemories([ - { role: "user", content: message }, - { role: "assistant", content: "Thank You!" }, - ], config); + let finalMessages: Array = []; + if (typeof messages === "string") { + finalMessages = [{ role: "user", content: messages }]; + }else { + finalMessages = convertToMem0Format(messages); + } + const response = await updateMemories(finalMessages, config); return response; }