Multimodal Support NodeSDK (#2320)
This commit is contained in:
@@ -12,7 +12,7 @@ Mem0 extends its capabilities beyond text by supporting multimodal data, includi
|
||||
When a user submits an image, Mem0 processes it to extract textual information and other pertinent details. These details are then added to the user's memory, enhancing the system's ability to understand and recall visual inputs.
|
||||
|
||||
<CodeGroup>
|
||||
```python Code
|
||||
```python Python
|
||||
import os
|
||||
from mem0 import MemoryClient
|
||||
|
||||
@@ -44,6 +44,34 @@ messages = [
|
||||
client.add(messages, user_id="alice")
|
||||
```
|
||||
|
||||
```typescript TypeScript
|
||||
import MemoryClient from "mem0ai";
|
||||
|
||||
const client = new MemoryClient();
|
||||
|
||||
const messages = [
|
||||
{
|
||||
role: "user",
|
||||
content: "Hi, my name is Alice."
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Nice to meet you, Alice! What do you like to eat?"
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: {
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg"
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
await client.add(messages, { user_id: "alice" })
|
||||
```
|
||||
|
||||
```json Output
|
||||
{
|
||||
"results": [
|
||||
@@ -90,7 +118,9 @@ client.add([image_message], user_id="alice")
|
||||
## 2. Using Base64 Image Encoding for Local Files
|
||||
|
||||
For local images—or when embedding the image directly is preferable—you can use a Base64-encoded string.
|
||||
```python
|
||||
|
||||
<CodeGroup>
|
||||
```python Python
|
||||
import base64
|
||||
|
||||
# Path to the image file
|
||||
@@ -113,6 +143,27 @@ image_message = {
|
||||
client.add([image_message], user_id="alice")
|
||||
```
|
||||
|
||||
```typescript TypeScript
|
||||
import MemoryClient from "mem0ai";
|
||||
import fs from 'fs';
|
||||
|
||||
const imagePath = 'path/to/your/image.jpg';
|
||||
|
||||
const base64Image = fs.readFileSync(imagePath, { encoding: 'base64' });
|
||||
|
||||
const imageMessage = {
|
||||
role: "user",
|
||||
content: {
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: `data:image/jpeg;base64,${base64Image}`
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
await client.add([imageMessage], { user_id: "alice" })
|
||||
```
|
||||
</CodeGroup>
|
||||
Using these methods, you can seamlessly incorporate images into your interactions, further enhancing Mem0's multimodal capabilities.
|
||||
|
||||
If you have any questions, please feel free to reach out to us using one of the following methods:
|
||||
|
||||
@@ -40,6 +40,34 @@ messages = [
|
||||
client.add(messages, user_id="alice")
|
||||
```
|
||||
|
||||
```typescript TypeScript
|
||||
import { Memory, Message } from "mem0ai/oss";
|
||||
|
||||
const client = new Memory();
|
||||
|
||||
const messages: Message[] = [
|
||||
{
|
||||
role: "user",
|
||||
content: "Hi, my name is Alice."
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "Nice to meet you, Alice! What do you like to eat?"
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: {
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg"
|
||||
}
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
await client.add(messages, { userId: "alice" })
|
||||
```
|
||||
|
||||
```json Output
|
||||
{
|
||||
"results": [
|
||||
@@ -66,6 +94,7 @@ Mem0 allows you to add images to user interactions through two primary methods:
|
||||
|
||||
You can include an image by passing its direct URL. This method is simple and efficient for online images.
|
||||
|
||||
<CodeGroup>
|
||||
```python
|
||||
# Define the image URL
|
||||
image_url = "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg"
|
||||
@@ -82,11 +111,33 @@ image_message = {
|
||||
}
|
||||
```
|
||||
|
||||
```typescript TypeScript
|
||||
import { Memory, Message } from "mem0ai/oss";
|
||||
|
||||
const client = new Memory();
|
||||
|
||||
const imageUrl = "https://www.superhealthykids.com/wp-content/uploads/2021/10/best-veggie-pizza-featured-image-square-2.jpg";
|
||||
|
||||
const imageMessage: Message = {
|
||||
role: "user",
|
||||
content: {
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: imageUrl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await client.add([imageMessage], { userId: "alice" })
|
||||
```
|
||||
</CodeGroup>
|
||||
|
||||
## 2. Using Base64 Image Encoding for Local Files
|
||||
|
||||
For local images or scenarios where embedding the image directly is preferable, you can use a Base64-encoded string.
|
||||
|
||||
```python
|
||||
<CodeGroup>
|
||||
```python Python
|
||||
import base64
|
||||
|
||||
# Path to the image file
|
||||
@@ -108,6 +159,29 @@ image_message = {
|
||||
}
|
||||
```
|
||||
|
||||
```typescript TypeScript
|
||||
import { Memory, Message } from "mem0ai/oss";
|
||||
|
||||
const client = new Memory();
|
||||
|
||||
const imagePath = "path/to/your/image.jpg";
|
||||
|
||||
const base64Image = fs.readFileSync(imagePath, { encoding: 'base64' });
|
||||
|
||||
const imageMessage: Message = {
|
||||
role: "user",
|
||||
content: {
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: `data:image/jpeg;base64,${base64Image}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await client.add([imageMessage], { userId: "alice" })
|
||||
```
|
||||
</CodeGroup>
|
||||
|
||||
By utilizing these methods, you can effectively incorporate images into user interactions, enhancing the multimodal capabilities of your Mem0 instance.
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "mem0ai",
|
||||
"version": "2.1.0",
|
||||
"version": "2.1.1",
|
||||
"description": "The Memory Layer For Your AI Apps",
|
||||
"main": "./dist/index.js",
|
||||
"module": "./dist/index.mjs",
|
||||
@@ -114,5 +114,6 @@
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
}
|
||||
},
|
||||
"packageManager": "pnpm@10.5.2+sha512.da9dc28cd3ff40d0592188235ab25d3202add8a207afbedc682220e4a0029ffbff4562102b9e6e46b4e3f9e8bd53e6d05de48544b0c57d4b0179e22c76d1199b"
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
SearchOptions,
|
||||
Webhook,
|
||||
WebhookPayload,
|
||||
Message,
|
||||
} from "./mem0.types";
|
||||
import { captureClientEvent, generateHash } from "./telemetry";
|
||||
|
||||
@@ -168,7 +169,7 @@ export default class MemoryClient {
|
||||
}
|
||||
|
||||
_preparePayload(
|
||||
messages: string | Array<{ role: string; content: string }>,
|
||||
messages: string | Array<Message>,
|
||||
options: MemoryOptions,
|
||||
): object {
|
||||
const payload: any = {};
|
||||
@@ -187,7 +188,7 @@ export default class MemoryClient {
|
||||
}
|
||||
|
||||
async add(
|
||||
messages: string | Array<{ role: string; content: string }>,
|
||||
messages: string | Array<Message>,
|
||||
options: MemoryOptions = {},
|
||||
): Promise<Array<Memory>> {
|
||||
this._validateOrgProject();
|
||||
|
||||
@@ -28,9 +28,16 @@ export enum API_VERSION {
|
||||
V2 = "v2",
|
||||
}
|
||||
|
||||
export interface MultiModalMessages {
|
||||
type: "image_url";
|
||||
image_url: {
|
||||
url: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface Messages {
|
||||
role: string;
|
||||
content: string;
|
||||
content: string | MultiModalMessages;
|
||||
}
|
||||
|
||||
export interface Message extends Messages {}
|
||||
|
||||
@@ -27,9 +27,15 @@ export class AnthropicLLM implements LLM {
|
||||
model: this.model,
|
||||
messages: otherMessages.map((msg) => ({
|
||||
role: msg.role as "user" | "assistant",
|
||||
content: msg.content,
|
||||
content:
|
||||
typeof msg.content === "string"
|
||||
? msg.content
|
||||
: msg.content.image_url.url,
|
||||
})),
|
||||
system: systemMessage?.content,
|
||||
system:
|
||||
typeof systemMessage?.content === "string"
|
||||
? systemMessage.content
|
||||
: undefined,
|
||||
max_tokens: 4096,
|
||||
});
|
||||
|
||||
|
||||
@@ -23,7 +23,10 @@ export class GroqLLM implements LLM {
|
||||
model: this.model,
|
||||
messages: messages.map((msg) => ({
|
||||
role: msg.role as "system" | "user" | "assistant",
|
||||
content: msg.content,
|
||||
content:
|
||||
typeof msg.content === "string"
|
||||
? msg.content
|
||||
: JSON.stringify(msg.content),
|
||||
})),
|
||||
response_format: responseFormat as { type: "text" | "json_object" },
|
||||
});
|
||||
@@ -36,7 +39,10 @@ export class GroqLLM implements LLM {
|
||||
model: this.model,
|
||||
messages: messages.map((msg) => ({
|
||||
role: msg.role as "system" | "user" | "assistant",
|
||||
content: msg.content,
|
||||
content:
|
||||
typeof msg.content === "string"
|
||||
? msg.content
|
||||
: JSON.stringify(msg.content),
|
||||
})),
|
||||
});
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ export class OpenAILLM implements LLM {
|
||||
|
||||
constructor(config: LLMConfig) {
|
||||
this.openai = new OpenAI({ apiKey: config.apiKey });
|
||||
this.model = config.model || "gpt-4-turbo-preview";
|
||||
this.model = config.model || "gpt-4o-mini";
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
@@ -17,10 +17,16 @@ export class OpenAILLM implements LLM {
|
||||
tools?: any[],
|
||||
): Promise<string | LLMResponse> {
|
||||
const completion = await this.openai.chat.completions.create({
|
||||
messages: messages.map((msg) => ({
|
||||
role: msg.role as "system" | "user" | "assistant",
|
||||
content: msg.content,
|
||||
})),
|
||||
messages: messages.map((msg) => {
|
||||
const role = msg.role as "system" | "user" | "assistant";
|
||||
return {
|
||||
role,
|
||||
content:
|
||||
typeof msg.content === "string"
|
||||
? msg.content
|
||||
: JSON.stringify(msg.content),
|
||||
};
|
||||
}),
|
||||
model: this.model,
|
||||
response_format: responseFormat as { type: "text" | "json_object" },
|
||||
...(tools && { tools, tool_choice: "auto" }),
|
||||
@@ -44,10 +50,16 @@ export class OpenAILLM implements LLM {
|
||||
|
||||
async generateChat(messages: Message[]): Promise<LLMResponse> {
|
||||
const completion = await this.openai.chat.completions.create({
|
||||
messages: messages.map((msg) => ({
|
||||
role: msg.role as "system" | "user" | "assistant",
|
||||
content: msg.content,
|
||||
})),
|
||||
messages: messages.map((msg) => {
|
||||
const role = msg.role as "system" | "user" | "assistant";
|
||||
return {
|
||||
role,
|
||||
content:
|
||||
typeof msg.content === "string"
|
||||
? msg.content
|
||||
: JSON.stringify(msg.content),
|
||||
};
|
||||
}),
|
||||
model: this.model,
|
||||
});
|
||||
const response = completion.choices[0].message;
|
||||
|
||||
@@ -19,7 +19,10 @@ export class OpenAIStructuredLLM implements LLM {
|
||||
const completion = await this.openai.chat.completions.create({
|
||||
messages: messages.map((msg) => ({
|
||||
role: msg.role as "system" | "user" | "assistant",
|
||||
content: msg.content,
|
||||
content:
|
||||
typeof msg.content === "string"
|
||||
? msg.content
|
||||
: JSON.stringify(msg.content),
|
||||
})),
|
||||
model: this.model,
|
||||
...(tools
|
||||
@@ -63,7 +66,10 @@ export class OpenAIStructuredLLM implements LLM {
|
||||
const completion = await this.openai.chat.completions.create({
|
||||
messages: messages.map((msg) => ({
|
||||
role: msg.role as "system" | "user" | "assistant",
|
||||
content: msg.content,
|
||||
content:
|
||||
typeof msg.content === "string"
|
||||
? msg.content
|
||||
: JSON.stringify(msg.content),
|
||||
})),
|
||||
model: this.model,
|
||||
});
|
||||
|
||||
@@ -31,6 +31,7 @@ import {
|
||||
DeleteAllMemoryOptions,
|
||||
GetAllMemoryOptions,
|
||||
} from "./memory.types";
|
||||
import { parse_vision_messages } from "../utils/memory";
|
||||
|
||||
export class Memory {
|
||||
private config: MemoryConfig;
|
||||
@@ -109,9 +110,11 @@ export class Memory {
|
||||
? (messages as Message[])
|
||||
: [{ role: "user", content: messages }];
|
||||
|
||||
const final_parsedMessages = await parse_vision_messages(parsedMessages);
|
||||
|
||||
// Add to vector store
|
||||
const vectorStoreResult = await this.addToVectorStore(
|
||||
parsedMessages,
|
||||
final_parsedMessages,
|
||||
metadata,
|
||||
filters,
|
||||
);
|
||||
@@ -121,7 +124,7 @@ export class Memory {
|
||||
if (this.graphMemory) {
|
||||
try {
|
||||
graphResult = await this.graphMemory.add(
|
||||
parsedMessages.map((m) => m.content).join("\n"),
|
||||
final_parsedMessages.map((m) => m.content).join("\n"),
|
||||
filters,
|
||||
);
|
||||
} catch (error) {
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
import { z } from "zod";
|
||||
|
||||
export interface MultiModalMessages {
|
||||
type: "image_url";
|
||||
image_url: {
|
||||
url: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface Message {
|
||||
role: string;
|
||||
content: string;
|
||||
content: string | MultiModalMessages;
|
||||
}
|
||||
|
||||
export interface EmbeddingConfig {
|
||||
|
||||
48
mem0-ts/src/oss/src/utils/memory.ts
Normal file
48
mem0-ts/src/oss/src/utils/memory.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { OpenAILLM } from "../llms/openai";
|
||||
import { Message } from "../types";
|
||||
|
||||
const get_image_description = async (image_url: string) => {
|
||||
const llm = new OpenAILLM({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
const response = await llm.generateResponse([
|
||||
{
|
||||
role: "user",
|
||||
content:
|
||||
"Provide a description of the image and do not include any additional text.",
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: { type: "image_url", image_url: { url: image_url } },
|
||||
},
|
||||
]);
|
||||
return response;
|
||||
};
|
||||
|
||||
const parse_vision_messages = async (messages: Message[]) => {
|
||||
const parsed_messages = [];
|
||||
for (const message of messages) {
|
||||
let new_message = {
|
||||
role: message.role,
|
||||
content: "",
|
||||
};
|
||||
if (message.role !== "system") {
|
||||
if (
|
||||
typeof message.content === "object" &&
|
||||
message.content.type === "image_url"
|
||||
) {
|
||||
const description = await get_image_description(
|
||||
message.content.image_url.url,
|
||||
);
|
||||
new_message.content =
|
||||
typeof description === "string"
|
||||
? description
|
||||
: JSON.stringify(description);
|
||||
parsed_messages.push(new_message);
|
||||
} else parsed_messages.push(message);
|
||||
}
|
||||
}
|
||||
return parsed_messages;
|
||||
};
|
||||
|
||||
export { parse_vision_messages };
|
||||
Reference in New Issue
Block a user