Added support for graceful failure in cases services are down. (#2650)

This commit is contained in:
Saket Aryan
2025-05-08 16:03:26 +05:30
committed by GitHub
parent 0e7c34f541
commit 84910b40da
7 changed files with 268 additions and 171 deletions

View File

@@ -209,6 +209,11 @@ mode: "wide"
<Tab title="TypeScript">
<Update label="2025-05-08" description="v2.1.25">
**Improvements:**
- **Client:** Improved error handling in client.
</Update>
<Update label="2025-05-06" description="v2.1.24">
**New Features:**
- **Client:** Added new param `output_format` to match Python SDK.
@@ -482,6 +487,11 @@ mode: "wide"
<Tab title="Vercel AI SDK">
<Update label="2025-05-08" description="v1.0.3">
**Improvements:**
- **Vercel AI SDK:** Added support for graceful failure in cases services are down.
</Update>
<Update label="2025-05-01" description="v1.0.1">
**New Features:**
- **Vercel AI SDK:** Added support for graph memories

View File

@@ -1,6 +1,6 @@
{
"name": "mem0ai",
"version": "2.1.24",
"version": "2.1.25",
"description": "The Memory Layer For Your AI Apps",
"main": "./dist/index.js",
"module": "./dist/index.mjs",

View File

@@ -179,23 +179,41 @@ export default class MemoryClient {
}
async ping(): Promise<void> {
const response = await fetch(`${this.host}/v1/ping/`, {
try {
const response = await this._fetchWithErrorHandling(
`${this.host}/v1/ping/`,
{
method: "GET",
headers: {
Authorization: `Token ${this.apiKey}`,
},
});
},
);
const data = await response.json();
if (data.status !== "ok") {
throw new Error("API Key is invalid");
if (!response || typeof response !== "object") {
throw new APIError("Invalid response format from ping endpoint");
}
const { org_id, project_id, user_email } = data;
if (response.status !== "ok") {
throw new APIError(response.message || "API Key is invalid");
}
this.organizationId = this.organizationId || org_id || null;
this.projectId = this.projectId || project_id || null;
this.telemetryId = user_email || "";
const { org_id, project_id, user_email } = response;
// Only update if values are actually present
if (org_id && !this.organizationId) this.organizationId = org_id;
if (project_id && !this.projectId) this.projectId = project_id;
if (user_email) this.telemetryId = user_email;
} catch (error: any) {
// Convert generic errors to APIError with meaningful messages
if (error instanceof APIError) {
throw error;
} else {
throw new APIError(
`Failed to ping server: ${error.message || "Unknown error"}`,
);
}
}
}
async add(

View File

@@ -1,6 +1,6 @@
{
"name": "@mem0/vercel-ai-provider",
"version": "1.0.2",
"version": "1.0.3",
"description": "Vercel AI Provider for providing memory to LLMs",
"main": "./dist/index.js",
"module": "./dist/index.mjs",

View File

@@ -31,9 +31,13 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
provider: string;
private async processMemories(messagesPrompts: LanguageModelV1Message[], mem0Config: Mem0ConfigSettings) {
try {
// Add New Memories
addMemories(messagesPrompts, mem0Config).then((res) => {
return res;
}).catch((e) => {
console.error("Error while adding memories");
return { memories: [], messagesPrompts: [] };
});
// Get Memories
@@ -41,23 +45,23 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
const mySystemPrompt = "These are the memories I have stored. Give more weightage to the question by users and try to answer that first. You have to modify your answer based on the memories I have provided. If the memories are irrelevant you can ignore them. Also don't reply to this section of the prompt, or the memories, they are only for your reference. The System prompt starts after text System Message: \n\n";
const isGraphEnabled = mem0Config.enable_graph;
const isGraphEnabled = mem0Config?.enable_graph;
let memoriesText = "";
let memoriesText2 = "";
try {
// @ts-ignore
if (isGraphEnabled) {
memoriesText = memories.results.map((memory: any) => {
return `Memory: ${memory.memory}\n\n`;
memoriesText = memories?.results?.map((memory: any) => {
return `Memory: ${memory?.memory}\n\n`;
}).join("\n\n");
memoriesText2 = memories.relations.map((memory: any) => {
return `Relation: ${memory.source} -> ${memory.relationship} -> ${memory.target} \n\n`;
memoriesText2 = memories?.relations?.map((memory: any) => {
return `Relation: ${memory?.source} -> ${memory?.relationship} -> ${memory?.target} \n\n`;
}).join("\n\n");
} else {
memoriesText = memories.map((memory: any) => {
return `Memory: ${memory.memory}\n\n`;
memoriesText = memories?.map((memory: any) => {
return `Memory: ${memory?.memory}\n\n`;
}).join("\n\n");
}
} catch(e) {
@@ -78,15 +82,19 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
};
// Add the system prompt to the beginning of the messages if there are memories
if (memories.length > 0) {
if (memories?.length > 0) {
messagesPrompts.unshift(systemPrompt);
}
if (isGraphEnabled) {
memories = memories.results;
memories = memories?.results;
}
return { memories, messagesPrompts };
} catch(e) {
console.error("Error while processing memories");
return { memories: [], messagesPrompts };
}
}
async doGenerate(options: LanguageModelV1CallOptions): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {
@@ -121,7 +129,7 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
});
// If there are no memories, return the original response
if (!memories || memories.length === 0) {
if (!memories || memories?.length === 0) {
return ans;
}
@@ -129,7 +137,7 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
const sources = [...(ans.sources || [])];
// Add a combined source with all memories
if (Array.isArray(memories) && memories.length > 0) {
if (Array.isArray(memories) && memories?.length > 0) {
sources.push({
title: "Mem0 Memories",
sourceType: "url",
@@ -138,13 +146,13 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
providerMetadata: {
mem0: {
memories: memories,
memoriesText: memories.map((memory: any) => memory.memory).join("\n\n")
memoriesText: memories?.map((memory: any) => memory?.memory).join("\n\n")
}
}
});
// Add individual memory sources for more detailed information
memories.forEach((memory: any) => {
memories?.forEach((memory: any) => {
sources.push({
title: memory.title || "Memory",
sourceType: "url",
@@ -153,7 +161,7 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
providerMetadata: {
mem0: {
memory: memory,
memoryText: memory.memory
memoryText: memory?.memory
}
}
});
@@ -204,7 +212,7 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
});
// If there are no memories, return the original stream
if (!memories || memories.length === 0) {
if (!memories || memories?.length === 0) {
return streamResponse;
}
@@ -216,7 +224,7 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
start(controller) {
// Add source chunks for each memory at the beginning
try {
if (Array.isArray(memories) && memories.length > 0) {
if (Array.isArray(memories) && memories?.length > 0) {
// Create a single source that contains all memories
controller.enqueue({
type: 'source',
@@ -228,25 +236,25 @@ export class Mem0GenericLanguageModel implements LanguageModelV1 {
providerMetadata: {
mem0: {
memories: memories,
memoriesText: memories.map((memory: any) => memory.memory).join("\n\n")
memoriesText: memories?.map((memory: any) => memory?.memory).join("\n\n")
}
}
}
});
// Also add individual memory sources for more detailed information
memories.forEach((memory: any) => {
memories?.forEach((memory: any) => {
controller.enqueue({
type: 'source',
source: {
title: memory.title || "Memory",
title: memory?.title || "Memory",
sourceType: "url",
id: "mem0-memory-" + generateRandomId(),
url: "https://app.mem0.ai",
providerMetadata: {
mem0: {
memory: memory,
memoryText: memory.memory
memoryText: memory?.memory
}
}
}

View File

@@ -7,6 +7,7 @@ interface Message {
}
const flattenPrompt = (prompt: LanguageModelV1Prompt) => {
try {
return prompt.map((part) => {
if (part.role === "user") {
return part.content
@@ -16,10 +17,16 @@ const flattenPrompt = (prompt: LanguageModelV1Prompt) => {
}
return "";
}).join(" ");
} catch (error) {
console.error("Error in flattenPrompt:", error);
return "";
}
}
const convertToMem0Format = (messages: LanguageModelV1Prompt) => {
try {
return messages.flatMap((message: any) => {
try {
if (typeof message.content === 'string') {
return {
role: message.role,
@@ -28,19 +35,33 @@ const convertToMem0Format = (messages: LanguageModelV1Prompt) => {
}
else {
return message.content.map((obj: any) => {
try {
if (obj.type === "text") {
return {
role: message.role,
content: obj.text,
};
} else {
return null; // Handle other cases or return null/undefined as needed
}
}).filter((item: null) => item !== null); // Filter out null values if necessary
return null;
} catch (error) {
console.error("Error processing content object:", error);
return null;
}
}).filter((item: null) => item !== null);
}
} catch (error) {
console.error("Error processing message:", error);
return [];
}
});
} catch (error) {
console.error("Error in convertToMem0Format:", error);
return [];
}
}
})};
const searchInternalMemories = async (query: string, config?: Mem0ConfigSettings, top_k: number = 5) => {
try {
const filters: { AND: Array<{ [key: string]: string | undefined }> } = {
AND: [],
};
@@ -67,25 +88,47 @@ const searchInternalMemories = async (query: string, config?: Mem0ConfigSettings
const org_project_filters = {
org_id: config&&config.org_id,
project_id: config&&config.project_id,
org_name: !config?.org_id ? config&&config.org_name : undefined, // deprecated
project_name: !config?.org_id ? config&&config.project_name : undefined, // deprecated
org_name: !config?.org_id ? config&&config.org_name : undefined,
project_name: !config?.org_id ? config&&config.project_name : undefined,
}
const options = {
method: 'POST',
// headers: {Authorization: `Token ${(config&&config.mem0ApiKey) || (typeof process !== 'undefined' && process.env && process.env.MEM0_API_KEY) || ""}`, 'Content-Type': 'application/json'},
headers: {Authorization: `Token ${loadApiKey({
const apiKey = loadApiKey({
apiKey: (config&&config.mem0ApiKey),
environmentVariableName: "MEM0_API_KEY",
description: "Mem0",
})}`, 'Content-Type': 'application/json'},
body: JSON.stringify({query, filters, ...config, top_k: config&&config.top_k || top_k, version: "v2", output_format: "v1.1", ...org_project_filters}),
});
const options = {
method: 'POST',
headers: {
Authorization: `Token ${apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
query,
filters,
...config,
top_k: config&&config.top_k || top_k,
version: "v2",
output_format: "v1.1",
...org_project_filters
}),
};
const response = await fetch('https://api.mem0.ai/v2/memories/search/', options);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
return data;
} catch (error) {
console.error("Error in searchInternalMemories:", error);
throw error;
}
}
const addMemories = async (messages: LanguageModelV1Prompt, config?: Mem0ConfigSettings) => {
try {
let finalMessages: Array<Message> = [];
if (typeof messages === "string") {
finalMessages = [{ role: "user", content: messages }];
@@ -94,83 +137,101 @@ const addMemories = async (messages: LanguageModelV1Prompt, config?: Mem0ConfigS
}
const response = await updateMemories(finalMessages, config);
return response;
} catch (error) {
console.error("Error in addMemories:", error);
throw error;
}
}
const updateMemories = async (messages: Array<Message>, config?: Mem0ConfigSettings) => {
const options = {
method: 'POST',
headers: {Authorization: `Token ${loadApiKey({
try {
const apiKey = loadApiKey({
apiKey: (config&&config.mem0ApiKey),
environmentVariableName: "MEM0_API_KEY",
description: "Mem0",
})}`, 'Content-Type': 'application/json'},
});
const options = {
method: 'POST',
headers: {
Authorization: `Token ${apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({messages, ...config}),
};
const response = await fetch('https://api.mem0.ai/v1/memories/', options);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
return data;
} catch (error) {
console.error("Error in updateMemories:", error);
throw error;
}
}
const retrieveMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0ConfigSettings) => {
try {
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
const systemPrompt = "These are the memories I have stored. Give more weightage to the question by users and try to answer that first. You have to modify your answer based on the memories I have provided. If the memories are irrelevant you can ignore them. Also don't reply to this section of the prompt, or the memories, they are only for your reference. The System prompt starts after text System Message: \n\n";
const memories = await searchInternalMemories(message, config);
let memoriesText1 = "";
let memoriesText2 = "";
let graphPrompt = "";
try {
// @ts-ignore
memoriesText1 = memories.results.map((memory: any)=>{
memoriesText1 = memories?.results?.map((memory: any) => {
return `Memory: ${memory.memory}\n\n`;
}).join("\n\n");
if (config?.enable_graph) {
memoriesText2 = memories.relations.map((memory: any)=>{
memoriesText2 = memories?.relations?.map((memory: any) => {
return `Relation: ${memory.source} -> ${memory.relationship} -> ${memory.target} \n\n`;
}).join("\n\n");
}
if (config?.enable_graph) {
graphPrompt = `HERE ARE THE GRAPHS RELATIONS FOR THE PREFERENCES OF THE USER:\n\n ${memoriesText2}`;
}
}catch(e){
console.error("Error while parsing memories");
// console.log(e);
} catch (error) {
console.error("Error while parsing memories:", error);
}
if(memories.length === 0){
if (!memories || memories?.length === 0) {
return "";
}
return `System Message: ${systemPrompt} ${memoriesText1} ${graphPrompt}`;
} catch (error) {
console.error("Error in retrieveMemories:", error);
throw error;
}
}
const getMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0ConfigSettings) => {
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
let memories = [];
try {
// @ts-ignore
memories = await searchInternalMemories(message, config);
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
const memories = await searchInternalMemories(message, config);
if (!config?.enable_graph) {
memories = memories.results;
}
}
catch(e){
console.error("Error while searching memories");
return memories?.results;
}
return memories;
} catch (error) {
console.error("Error in getMemories:", error);
throw error;
}
}
const searchMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0ConfigSettings) => {
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
let memories = [];
try {
// @ts-ignore
memories = await searchInternalMemories(message, config);
}
catch(e){
console.error("Error while searching memories");
}
const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt);
const memories = await searchInternalMemories(message, config);
return memories;
} catch (error) {
console.error("Error in searchMemories:", error);
return [];
}
}
export {addMemories, updateMemories, retrieveMemories, flattenPrompt, searchMemories, getMemories};