Multimodal Support NodeSDK (#2320)

This commit is contained in:
Saket Aryan
2025-03-06 17:50:41 +05:30
committed by GitHub
parent 2c31a930a3
commit 6d7ef3ae45
12 changed files with 248 additions and 26 deletions

View File

@@ -12,7 +12,7 @@ Mem0 extends its capabilities beyond text by supporting multimodal data, includi
When a user submits an image, Mem0 processes it to extract textual information and other pertinent details. These details are then added to the user's memory, enhancing the system's ability to understand and recall visual inputs. When a user submits an image, Mem0 processes it to extract textual information and other pertinent details. These details are then added to the user's memory, enhancing the system's ability to understand and recall visual inputs.
<CodeGroup> <CodeGroup>
```python Code ```python Python
import os import os
from mem0 import MemoryClient from mem0 import MemoryClient
@@ -44,6 +44,34 @@ messages = [
client.add(messages, user_id="alice") client.add(messages, user_id="alice")
``` ```
```typescript TypeScript
import MemoryClient from "mem0ai";
const client = new MemoryClient();
const messages = [
{
role: "user",
content: "Hi, my name is Alice."
},
{
role: "assistant",
content: "Nice to meet you, Alice! What do you like to eat?"
},
{
role: "user",
content: {
type: "image_url",
image_url: {
url: "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg"
}
}
},
]
await client.add(messages, { user_id: "alice" })
```
```json Output ```json Output
{ {
"results": [ "results": [
@@ -90,7 +118,9 @@ client.add([image_message], user_id="alice")
## 2. Using Base64 Image Encoding for Local Files ## 2. Using Base64 Image Encoding for Local Files
For local images—or when embedding the image directly is preferable—you can use a Base64-encoded string. For local images—or when embedding the image directly is preferable—you can use a Base64-encoded string.
```python
<CodeGroup>
```python Python
import base64 import base64
# Path to the image file # Path to the image file
@@ -113,6 +143,27 @@ image_message = {
client.add([image_message], user_id="alice") client.add([image_message], user_id="alice")
``` ```
```typescript TypeScript
import MemoryClient from "mem0ai";
import fs from 'fs';
const imagePath = 'path/to/your/image.jpg';
const base64Image = fs.readFileSync(imagePath, { encoding: 'base64' });
const imageMessage = {
role: "user",
content: {
type: "image_url",
image_url: {
url: `data:image/jpeg;base64,${base64Image}`
}
}
};
await client.add([imageMessage], { user_id: "alice" })
```
</CodeGroup>
Using these methods, you can seamlessly incorporate images into your interactions, further enhancing Mem0's multimodal capabilities. Using these methods, you can seamlessly incorporate images into your interactions, further enhancing Mem0's multimodal capabilities.
If you have any questions, please feel free to reach out to us using one of the following methods: If you have any questions, please feel free to reach out to us using one of the following methods:

View File

@@ -40,6 +40,34 @@ messages = [
client.add(messages, user_id="alice") client.add(messages, user_id="alice")
``` ```
```typescript TypeScript
import { Memory, Message } from "mem0ai/oss";
const client = new Memory();
const messages: Message[] = [
{
role: "user",
content: "Hi, my name is Alice."
},
{
role: "assistant",
content: "Nice to meet you, Alice! What do you like to eat?"
},
{
role: "user",
content: {
type: "image_url",
image_url: {
url: "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg"
}
}
},
]
await client.add(messages, { userId: "alice" })
```
```json Output ```json Output
{ {
"results": [ "results": [
@@ -66,6 +94,7 @@ Mem0 allows you to add images to user interactions through two primary methods:
You can include an image by passing its direct URL. This method is simple and efficient for online images. You can include an image by passing its direct URL. This method is simple and efficient for online images.
<CodeGroup>
```python ```python
# Define the image URL # Define the image URL
image_url = "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg" image_url = "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg"
@@ -82,11 +111,33 @@ image_message = {
} }
``` ```
```typescript TypeScript
import { Memory, Message } from "mem0ai/oss";
const client = new Memory();
const imageUrl = "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg";
const imageMessage: Message = {
role: "user",
content: {
type: "image_url",
image_url: {
url: imageUrl
}
}
}
await client.add([imageMessage], { userId: "alice" })
```
</CodeGroup>
## 2. Using Base64 Image Encoding for Local Files ## 2. Using Base64 Image Encoding for Local Files
For local images or scenarios where embedding the image directly is preferable, you can use a Base64-encoded string. For local images or scenarios where embedding the image directly is preferable, you can use a Base64-encoded string.
```python <CodeGroup>
```python Python
import base64 import base64
# Path to the image file # Path to the image file
@@ -108,6 +159,29 @@ image_message = {
} }
``` ```
```typescript TypeScript
import { Memory, Message } from "mem0ai/oss";
const client = new Memory();
const imagePath = "path/to/your/image.jpg";
const base64Image = fs.readFileSync(imagePath, { encoding: 'base64' });
const imageMessage: Message = {
role: "user",
content: {
type: "image_url",
image_url: {
url: `data:image/jpeg;base64,${base64Image}`
}
}
}
await client.add([imageMessage], { userId: "alice" })
```
</CodeGroup>
By utilizing these methods, you can effectively incorporate images into user interactions, enhancing the multimodal capabilities of your Mem0 instance. By utilizing these methods, you can effectively incorporate images into user interactions, enhancing the multimodal capabilities of your Mem0 instance.
<Note> <Note>

View File

@@ -1,6 +1,6 @@
{ {
"name": "mem0ai", "name": "mem0ai",
"version": "2.1.0", "version": "2.1.1",
"description": "The Memory Layer For Your AI Apps", "description": "The Memory Layer For Your AI Apps",
"main": "./dist/index.js", "main": "./dist/index.js",
"module": "./dist/index.mjs", "module": "./dist/index.mjs",
@@ -114,5 +114,6 @@
}, },
"publishConfig": { "publishConfig": {
"access": "public" "access": "public"
} },
"packageManager": "pnpm@10.5.2+sha512.da9dc28cd3ff40d0592188235ab25d3202add8a207afbedc682220e4a0029ffbff4562102b9e6e46b4e3f9e8bd53e6d05de48544b0c57d4b0179e22c76d1199b"
} }

View File

@@ -11,6 +11,7 @@ import {
SearchOptions, SearchOptions,
Webhook, Webhook,
WebhookPayload, WebhookPayload,
Message,
} from "./mem0.types"; } from "./mem0.types";
import { captureClientEvent, generateHash } from "./telemetry"; import { captureClientEvent, generateHash } from "./telemetry";
@@ -168,7 +169,7 @@ export default class MemoryClient {
} }
_preparePayload( _preparePayload(
messages: string | Array<{ role: string; content: string }>, messages: string | Array<Message>,
options: MemoryOptions, options: MemoryOptions,
): object { ): object {
const payload: any = {}; const payload: any = {};
@@ -187,7 +188,7 @@ export default class MemoryClient {
} }
async add( async add(
messages: string | Array<{ role: string; content: string }>, messages: string | Array<Message>,
options: MemoryOptions = {}, options: MemoryOptions = {},
): Promise<Array<Memory>> { ): Promise<Array<Memory>> {
this._validateOrgProject(); this._validateOrgProject();

View File

@@ -28,9 +28,16 @@ export enum API_VERSION {
V2 = "v2", V2 = "v2",
} }
export interface MultiModalMessages {
type: "image_url";
image_url: {
url: string;
};
}
export interface Messages { export interface Messages {
role: string; role: string;
content: string; content: string | MultiModalMessages;
} }
export interface Message extends Messages {} export interface Message extends Messages {}

View File

@@ -27,9 +27,15 @@ export class AnthropicLLM implements LLM {
model: this.model, model: this.model,
messages: otherMessages.map((msg) => ({ messages: otherMessages.map((msg) => ({
role: msg.role as "user" | "assistant", role: msg.role as "user" | "assistant",
content: msg.content, content:
typeof msg.content === "string"
? msg.content
: msg.content.image_url.url,
})), })),
system: systemMessage?.content, system:
typeof systemMessage?.content === "string"
? systemMessage.content
: undefined,
max_tokens: 4096, max_tokens: 4096,
}); });

View File

@@ -23,7 +23,10 @@ export class GroqLLM implements LLM {
model: this.model, model: this.model,
messages: messages.map((msg) => ({ messages: messages.map((msg) => ({
role: msg.role as "system" | "user" | "assistant", role: msg.role as "system" | "user" | "assistant",
content: msg.content, content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
})), })),
response_format: responseFormat as { type: "text" | "json_object" }, response_format: responseFormat as { type: "text" | "json_object" },
}); });
@@ -36,7 +39,10 @@ export class GroqLLM implements LLM {
model: this.model, model: this.model,
messages: messages.map((msg) => ({ messages: messages.map((msg) => ({
role: msg.role as "system" | "user" | "assistant", role: msg.role as "system" | "user" | "assistant",
content: msg.content, content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
})), })),
}); });

View File

@@ -8,7 +8,7 @@ export class OpenAILLM implements LLM {
constructor(config: LLMConfig) { constructor(config: LLMConfig) {
this.openai = new OpenAI({ apiKey: config.apiKey }); this.openai = new OpenAI({ apiKey: config.apiKey });
this.model = config.model || "gpt-4-turbo-preview"; this.model = config.model || "gpt-4o-mini";
} }
async generateResponse( async generateResponse(
@@ -17,10 +17,16 @@ export class OpenAILLM implements LLM {
tools?: any[], tools?: any[],
): Promise<string | LLMResponse> { ): Promise<string | LLMResponse> {
const completion = await this.openai.chat.completions.create({ const completion = await this.openai.chat.completions.create({
messages: messages.map((msg) => ({ messages: messages.map((msg) => {
role: msg.role as "system" | "user" | "assistant", const role = msg.role as "system" | "user" | "assistant";
content: msg.content, return {
})), role,
content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
};
}),
model: this.model, model: this.model,
response_format: responseFormat as { type: "text" | "json_object" }, response_format: responseFormat as { type: "text" | "json_object" },
...(tools && { tools, tool_choice: "auto" }), ...(tools && { tools, tool_choice: "auto" }),
@@ -44,10 +50,16 @@ export class OpenAILLM implements LLM {
async generateChat(messages: Message[]): Promise<LLMResponse> { async generateChat(messages: Message[]): Promise<LLMResponse> {
const completion = await this.openai.chat.completions.create({ const completion = await this.openai.chat.completions.create({
messages: messages.map((msg) => ({ messages: messages.map((msg) => {
role: msg.role as "system" | "user" | "assistant", const role = msg.role as "system" | "user" | "assistant";
content: msg.content, return {
})), role,
content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
};
}),
model: this.model, model: this.model,
}); });
const response = completion.choices[0].message; const response = completion.choices[0].message;

View File

@@ -19,7 +19,10 @@ export class OpenAIStructuredLLM implements LLM {
const completion = await this.openai.chat.completions.create({ const completion = await this.openai.chat.completions.create({
messages: messages.map((msg) => ({ messages: messages.map((msg) => ({
role: msg.role as "system" | "user" | "assistant", role: msg.role as "system" | "user" | "assistant",
content: msg.content, content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
})), })),
model: this.model, model: this.model,
...(tools ...(tools
@@ -63,7 +66,10 @@ export class OpenAIStructuredLLM implements LLM {
const completion = await this.openai.chat.completions.create({ const completion = await this.openai.chat.completions.create({
messages: messages.map((msg) => ({ messages: messages.map((msg) => ({
role: msg.role as "system" | "user" | "assistant", role: msg.role as "system" | "user" | "assistant",
content: msg.content, content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
})), })),
model: this.model, model: this.model,
}); });

View File

@@ -31,6 +31,7 @@ import {
DeleteAllMemoryOptions, DeleteAllMemoryOptions,
GetAllMemoryOptions, GetAllMemoryOptions,
} from "./memory.types"; } from "./memory.types";
import { parse_vision_messages } from "../utils/memory";
export class Memory { export class Memory {
private config: MemoryConfig; private config: MemoryConfig;
@@ -109,9 +110,11 @@ export class Memory {
? (messages as Message[]) ? (messages as Message[])
: [{ role: "user", content: messages }]; : [{ role: "user", content: messages }];
const final_parsedMessages = await parse_vision_messages(parsedMessages);
// Add to vector store // Add to vector store
const vectorStoreResult = await this.addToVectorStore( const vectorStoreResult = await this.addToVectorStore(
parsedMessages, final_parsedMessages,
metadata, metadata,
filters, filters,
); );
@@ -121,7 +124,7 @@ export class Memory {
if (this.graphMemory) { if (this.graphMemory) {
try { try {
graphResult = await this.graphMemory.add( graphResult = await this.graphMemory.add(
parsedMessages.map((m) => m.content).join("\n"), final_parsedMessages.map((m) => m.content).join("\n"),
filters, filters,
); );
} catch (error) { } catch (error) {

View File

@@ -1,8 +1,15 @@
import { z } from "zod"; import { z } from "zod";
export interface MultiModalMessages {
type: "image_url";
image_url: {
url: string;
};
}
export interface Message { export interface Message {
role: string; role: string;
content: string; content: string | MultiModalMessages;
} }
export interface EmbeddingConfig { export interface EmbeddingConfig {

View File

@@ -0,0 +1,48 @@
import { OpenAILLM } from "../llms/openai";
import { Message } from "../types";
const get_image_description = async (image_url: string) => {
const llm = new OpenAILLM({
apiKey: process.env.OPENAI_API_KEY,
});
const response = await llm.generateResponse([
{
role: "user",
content:
"Provide a description of the image and do not include any additional text.",
},
{
role: "user",
content: { type: "image_url", image_url: { url: image_url } },
},
]);
return response;
};
const parse_vision_messages = async (messages: Message[]) => {
const parsed_messages = [];
for (const message of messages) {
let new_message = {
role: message.role,
content: "",
};
if (message.role !== "system") {
if (
typeof message.content === "object" &&
message.content.type === "image_url"
) {
const description = await get_image_description(
message.content.image_url.url,
);
new_message.content =
typeof description === "string"
? description
: JSON.stringify(description);
parsed_messages.push(new_message);
} else parsed_messages.push(message);
}
}
return parsed_messages;
};
export { parse_vision_messages };