diff --git a/docs/features/multimodal-support.mdx b/docs/features/multimodal-support.mdx index a93a7591..056a732e 100644 --- a/docs/features/multimodal-support.mdx +++ b/docs/features/multimodal-support.mdx @@ -12,7 +12,7 @@ Mem0 extends its capabilities beyond text by supporting multimodal data, includi When a user submits an image, Mem0 processes it to extract textual information and other pertinent details. These details are then added to the user's memory, enhancing the system's ability to understand and recall visual inputs. -```python Code +```python Python import os from mem0 import MemoryClient @@ -44,6 +44,34 @@ messages = [ client.add(messages, user_id="alice") ``` +```typescript TypeScript +import MemoryClient from "mem0ai"; + +const client = new MemoryClient(); + +const messages = [ + { + role: "user", + content: "Hi, my name is Alice." + }, + { + role: "assistant", + content: "Nice to meet you, Alice! What do you like to eat?" + }, + { + role: "user", + content: { + type: "image_url", + image_url: { + url: "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg" + } + } + }, +] + +await client.add(messages, { user_id: "alice" }) +``` + ```json Output { "results": [ @@ -90,7 +118,9 @@ client.add([image_message], user_id="alice") ## 2. Using Base64 Image Encoding for Local Files For local images—or when embedding the image directly is preferable—you can use a Base64-encoded string. -```python + + +```python Python import base64 # Path to the image file @@ -113,6 +143,27 @@ image_message = { client.add([image_message], user_id="alice") ``` +```typescript TypeScript +import MemoryClient from "mem0ai"; +import fs from 'fs'; + +const imagePath = 'path/to/your/image.jpg'; + +const base64Image = fs.readFileSync(imagePath, { encoding: 'base64' }); + +const imageMessage = { + role: "user", + content: { + type: "image_url", + image_url: { + url: `data:image/jpeg;base64,${base64Image}` + } + } +}; + +await client.add([imageMessage], { user_id: "alice" }) +``` + Using these methods, you can seamlessly incorporate images into your interactions, further enhancing Mem0's multimodal capabilities. If you have any questions, please feel free to reach out to us using one of the following methods: diff --git a/docs/open-source/multimodal-support.mdx b/docs/open-source/multimodal-support.mdx index b2cd343e..7cff75df 100644 --- a/docs/open-source/multimodal-support.mdx +++ b/docs/open-source/multimodal-support.mdx @@ -40,6 +40,34 @@ messages = [ client.add(messages, user_id="alice") ``` +```typescript TypeScript +import { Memory, Message } from "mem0ai/oss"; + +const client = new Memory(); + +const messages: Message[] = [ + { + role: "user", + content: "Hi, my name is Alice." + }, + { + role: "assistant", + content: "Nice to meet you, Alice! What do you like to eat?" + }, + { + role: "user", + content: { + type: "image_url", + image_url: { + url: "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg" + } + } + }, +] + +await client.add(messages, { userId: "alice" }) +``` + ```json Output { "results": [ @@ -66,6 +94,7 @@ Mem0 allows you to add images to user interactions through two primary methods: You can include an image by passing its direct URL. This method is simple and efficient for online images. + ```python # Define the image URL image_url = "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg" @@ -82,11 +111,33 @@ image_message = { } ``` +```typescript TypeScript +import { Memory, Message } from "mem0ai/oss"; + +const client = new Memory(); + +const imageUrl = "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg"; + +const imageMessage: Message = { + role: "user", + content: { + type: "image_url", + image_url: { + url: imageUrl + } + } +} + +await client.add([imageMessage], { userId: "alice" }) +``` + + ## 2. Using Base64 Image Encoding for Local Files For local images or scenarios where embedding the image directly is preferable, you can use a Base64-encoded string. -```python + +```python Python import base64 # Path to the image file @@ -108,6 +159,29 @@ image_message = { } ``` +```typescript TypeScript +import { Memory, Message } from "mem0ai/oss"; + +const client = new Memory(); + +const imagePath = "path/to/your/image.jpg"; + +const base64Image = fs.readFileSync(imagePath, { encoding: 'base64' }); + +const imageMessage: Message = { + role: "user", + content: { + type: "image_url", + image_url: { + url: `data:image/jpeg;base64,${base64Image}` + } + } +} + +await client.add([imageMessage], { userId: "alice" }) +``` + + By utilizing these methods, you can effectively incorporate images into user interactions, enhancing the multimodal capabilities of your Mem0 instance. diff --git a/mem0-ts/package.json b/mem0-ts/package.json index 895abf53..5340f204 100644 --- a/mem0-ts/package.json +++ b/mem0-ts/package.json @@ -1,6 +1,6 @@ { "name": "mem0ai", - "version": "2.1.0", + "version": "2.1.1", "description": "The Memory Layer For Your AI Apps", "main": "./dist/index.js", "module": "./dist/index.mjs", @@ -114,5 +114,6 @@ }, "publishConfig": { "access": "public" - } + }, + "packageManager": "pnpm@10.5.2+sha512.da9dc28cd3ff40d0592188235ab25d3202add8a207afbedc682220e4a0029ffbff4562102b9e6e46b4e3f9e8bd53e6d05de48544b0c57d4b0179e22c76d1199b" } diff --git a/mem0-ts/src/client/mem0.ts b/mem0-ts/src/client/mem0.ts index 802e18b1..8cab84da 100644 --- a/mem0-ts/src/client/mem0.ts +++ b/mem0-ts/src/client/mem0.ts @@ -11,6 +11,7 @@ import { SearchOptions, Webhook, WebhookPayload, + Message, } from "./mem0.types"; import { captureClientEvent, generateHash } from "./telemetry"; @@ -168,7 +169,7 @@ export default class MemoryClient { } _preparePayload( - messages: string | Array<{ role: string; content: string }>, + messages: string | Array, options: MemoryOptions, ): object { const payload: any = {}; @@ -187,7 +188,7 @@ export default class MemoryClient { } async add( - messages: string | Array<{ role: string; content: string }>, + messages: string | Array, options: MemoryOptions = {}, ): Promise> { this._validateOrgProject(); diff --git a/mem0-ts/src/client/mem0.types.ts b/mem0-ts/src/client/mem0.types.ts index 2fe293ec..8fa4b71e 100644 --- a/mem0-ts/src/client/mem0.types.ts +++ b/mem0-ts/src/client/mem0.types.ts @@ -28,9 +28,16 @@ export enum API_VERSION { V2 = "v2", } +export interface MultiModalMessages { + type: "image_url"; + image_url: { + url: string; + }; +} + export interface Messages { role: string; - content: string; + content: string | MultiModalMessages; } export interface Message extends Messages {} diff --git a/mem0-ts/src/oss/src/llms/anthropic.ts b/mem0-ts/src/oss/src/llms/anthropic.ts index 534a67d3..b2cc695e 100644 --- a/mem0-ts/src/oss/src/llms/anthropic.ts +++ b/mem0-ts/src/oss/src/llms/anthropic.ts @@ -27,9 +27,15 @@ export class AnthropicLLM implements LLM { model: this.model, messages: otherMessages.map((msg) => ({ role: msg.role as "user" | "assistant", - content: msg.content, + content: + typeof msg.content === "string" + ? msg.content + : msg.content.image_url.url, })), - system: systemMessage?.content, + system: + typeof systemMessage?.content === "string" + ? systemMessage.content + : undefined, max_tokens: 4096, }); diff --git a/mem0-ts/src/oss/src/llms/groq.ts b/mem0-ts/src/oss/src/llms/groq.ts index 8c891812..61626210 100644 --- a/mem0-ts/src/oss/src/llms/groq.ts +++ b/mem0-ts/src/oss/src/llms/groq.ts @@ -23,7 +23,10 @@ export class GroqLLM implements LLM { model: this.model, messages: messages.map((msg) => ({ role: msg.role as "system" | "user" | "assistant", - content: msg.content, + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), })), response_format: responseFormat as { type: "text" | "json_object" }, }); @@ -36,7 +39,10 @@ export class GroqLLM implements LLM { model: this.model, messages: messages.map((msg) => ({ role: msg.role as "system" | "user" | "assistant", - content: msg.content, + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), })), }); diff --git a/mem0-ts/src/oss/src/llms/openai.ts b/mem0-ts/src/oss/src/llms/openai.ts index a321ef13..d1e148f5 100644 --- a/mem0-ts/src/oss/src/llms/openai.ts +++ b/mem0-ts/src/oss/src/llms/openai.ts @@ -8,7 +8,7 @@ export class OpenAILLM implements LLM { constructor(config: LLMConfig) { this.openai = new OpenAI({ apiKey: config.apiKey }); - this.model = config.model || "gpt-4-turbo-preview"; + this.model = config.model || "gpt-4o-mini"; } async generateResponse( @@ -17,10 +17,16 @@ export class OpenAILLM implements LLM { tools?: any[], ): Promise { const completion = await this.openai.chat.completions.create({ - messages: messages.map((msg) => ({ - role: msg.role as "system" | "user" | "assistant", - content: msg.content, - })), + messages: messages.map((msg) => { + const role = msg.role as "system" | "user" | "assistant"; + return { + role, + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), + }; + }), model: this.model, response_format: responseFormat as { type: "text" | "json_object" }, ...(tools && { tools, tool_choice: "auto" }), @@ -44,10 +50,16 @@ export class OpenAILLM implements LLM { async generateChat(messages: Message[]): Promise { const completion = await this.openai.chat.completions.create({ - messages: messages.map((msg) => ({ - role: msg.role as "system" | "user" | "assistant", - content: msg.content, - })), + messages: messages.map((msg) => { + const role = msg.role as "system" | "user" | "assistant"; + return { + role, + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), + }; + }), model: this.model, }); const response = completion.choices[0].message; diff --git a/mem0-ts/src/oss/src/llms/openai_structured.ts b/mem0-ts/src/oss/src/llms/openai_structured.ts index 38b16e1b..9144345d 100644 --- a/mem0-ts/src/oss/src/llms/openai_structured.ts +++ b/mem0-ts/src/oss/src/llms/openai_structured.ts @@ -19,7 +19,10 @@ export class OpenAIStructuredLLM implements LLM { const completion = await this.openai.chat.completions.create({ messages: messages.map((msg) => ({ role: msg.role as "system" | "user" | "assistant", - content: msg.content, + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), })), model: this.model, ...(tools @@ -63,7 +66,10 @@ export class OpenAIStructuredLLM implements LLM { const completion = await this.openai.chat.completions.create({ messages: messages.map((msg) => ({ role: msg.role as "system" | "user" | "assistant", - content: msg.content, + content: + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content), })), model: this.model, }); diff --git a/mem0-ts/src/oss/src/memory/index.ts b/mem0-ts/src/oss/src/memory/index.ts index 21d3ed44..a164798d 100644 --- a/mem0-ts/src/oss/src/memory/index.ts +++ b/mem0-ts/src/oss/src/memory/index.ts @@ -31,6 +31,7 @@ import { DeleteAllMemoryOptions, GetAllMemoryOptions, } from "./memory.types"; +import { parse_vision_messages } from "../utils/memory"; export class Memory { private config: MemoryConfig; @@ -109,9 +110,11 @@ export class Memory { ? (messages as Message[]) : [{ role: "user", content: messages }]; + const final_parsedMessages = await parse_vision_messages(parsedMessages); + // Add to vector store const vectorStoreResult = await this.addToVectorStore( - parsedMessages, + final_parsedMessages, metadata, filters, ); @@ -121,7 +124,7 @@ export class Memory { if (this.graphMemory) { try { graphResult = await this.graphMemory.add( - parsedMessages.map((m) => m.content).join("\n"), + final_parsedMessages.map((m) => m.content).join("\n"), filters, ); } catch (error) { diff --git a/mem0-ts/src/oss/src/types/index.ts b/mem0-ts/src/oss/src/types/index.ts index ca15f17d..2dcd5cab 100644 --- a/mem0-ts/src/oss/src/types/index.ts +++ b/mem0-ts/src/oss/src/types/index.ts @@ -1,8 +1,15 @@ import { z } from "zod"; +export interface MultiModalMessages { + type: "image_url"; + image_url: { + url: string; + }; +} + export interface Message { role: string; - content: string; + content: string | MultiModalMessages; } export interface EmbeddingConfig { diff --git a/mem0-ts/src/oss/src/utils/memory.ts b/mem0-ts/src/oss/src/utils/memory.ts new file mode 100644 index 00000000..83280936 --- /dev/null +++ b/mem0-ts/src/oss/src/utils/memory.ts @@ -0,0 +1,48 @@ +import { OpenAILLM } from "../llms/openai"; +import { Message } from "../types"; + +const get_image_description = async (image_url: string) => { + const llm = new OpenAILLM({ + apiKey: process.env.OPENAI_API_KEY, + }); + const response = await llm.generateResponse([ + { + role: "user", + content: + "Provide a description of the image and do not include any additional text.", + }, + { + role: "user", + content: { type: "image_url", image_url: { url: image_url } }, + }, + ]); + return response; +}; + +const parse_vision_messages = async (messages: Message[]) => { + const parsed_messages = []; + for (const message of messages) { + let new_message = { + role: message.role, + content: "", + }; + if (message.role !== "system") { + if ( + typeof message.content === "object" && + message.content.type === "image_url" + ) { + const description = await get_image_description( + message.content.image_url.url, + ); + new_message.content = + typeof description === "string" + ? description + : JSON.stringify(description); + parsed_messages.push(new_message); + } else parsed_messages.push(message); + } + } + return parsed_messages; +}; + +export { parse_vision_messages };