From c3c9205ffa5205587abd1a81d388c776cdd2ed8a Mon Sep 17 00:00:00 2001 From: Saket Aryan Date: Tue, 15 Apr 2025 20:08:41 +0530 Subject: [PATCH] TypeScript OSS: Langchain Integration (#2556) --- docs/changelog.mdx | 11 + .../components/embedders/models/langchain.mdx | 26 ++ docs/components/llms/models/langchain.mdx | 31 +++ docs/components/vectordbs/dbs/langchain.mdx | 27 ++ mem0-ts/package.json | 7 +- mem0-ts/pnpm-lock.yaml | 190 +++++++++++++ mem0-ts/src/oss/src/config/defaults.ts | 1 + mem0-ts/src/oss/src/config/manager.ts | 97 +++++-- mem0-ts/src/oss/src/embeddings/langchain.ts | 50 ++++ mem0-ts/src/oss/src/graphs/tools.ts | 54 ++++ mem0-ts/src/oss/src/index.ts | 4 + mem0-ts/src/oss/src/llms/base.ts | 2 +- mem0-ts/src/oss/src/llms/langchain.ts | 255 ++++++++++++++++++ mem0-ts/src/oss/src/memory/index.ts | 74 ++++- mem0-ts/src/oss/src/prompts/index.ts | 34 +++ mem0-ts/src/oss/src/types/index.ts | 19 +- mem0-ts/src/oss/src/utils/factory.ts | 17 +- .../src/oss/src/vector_stores/langchain.ts | 231 ++++++++++++++++ 18 files changed, 1075 insertions(+), 55 deletions(-) create mode 100644 mem0-ts/src/oss/src/embeddings/langchain.ts create mode 100644 mem0-ts/src/oss/src/llms/langchain.ts create mode 100644 mem0-ts/src/oss/src/vector_stores/langchain.ts diff --git a/docs/changelog.mdx b/docs/changelog.mdx index 0a080122..68504fde 100644 --- a/docs/changelog.mdx +++ b/docs/changelog.mdx @@ -127,6 +127,17 @@ mode: "wide" + +**New Features:** +- **OSS SDK:** Added support for Langchain LLM +- **OSS SDK:** Added support for Langchain Embedder +- **OSS SDK:** Added support for Langchain Vector Store + +**Improvements:** +- **OSS SDK:** Changed `model` in LLM and Embedder to use type any from `string` to use langchain llm models +- **OSS SDK:** Added client to vector store config for langchain vector store + + **Bug Fixes:** - **Azure OpenAI:** Fixed issues with Azure OpenAI diff --git a/docs/components/embedders/models/langchain.mdx b/docs/components/embedders/models/langchain.mdx index 00dc4c77..aa48ae26 100644 --- a/docs/components/embedders/models/langchain.mdx +++ b/docs/components/embedders/models/langchain.mdx @@ -42,6 +42,32 @@ messages = [ ] m.add(messages, user_id="alice", metadata={"category": "movies"}) ``` + +```typescript TypeScript +import { Memory } from "mem0ai"; +import { OpenAIEmbeddings } from "@langchain/openai"; + +const embeddings = new OpenAIEmbeddings(); +const config = { + "embedder": { + "provider": "langchain", + "config": { + "model": embeddings + } + } +} + +const memory = new Memory(config); + +const messages = [ + { role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" }, + { role: "assistant", content: "How about a thriller movies? They can be quite engaging." }, + { role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." }, + { role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." } +] + +memory.add(messages, user_id="alice", metadata={"category": "movies"}) +``` ## Supported LangChain Embedding Providers diff --git a/docs/components/llms/models/langchain.mdx b/docs/components/llms/models/langchain.mdx index 3198ca83..4113bc05 100644 --- a/docs/components/llms/models/langchain.mdx +++ b/docs/components/llms/models/langchain.mdx @@ -43,6 +43,37 @@ messages = [ ] m.add(messages, user_id="alice", metadata={"category": "movies"}) ``` + +```typescript TypeScript +import { Memory } from "mem0ai"; +import { ChatOpenAI } from "@langchain/openai"; + +const openai_model = new ChatOpenAI({ + model: "gpt-4o", + temperature: 0.2, + max_tokens: 2000 +}) + +const config = { + "llm": { + "provider": "langchain", + "config": { + "model": openai_model + } + } +} + +const memory = new Memory(config); + +const messages = [ + { role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" }, + { role: "assistant", content: "How about a thriller movies? They can be quite engaging." }, + { role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." }, + { role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." } +] + +memory.add(messages, user_id="alice", metadata={"category": "movies"}) +``` ## Supported LangChain Providers diff --git a/docs/components/vectordbs/dbs/langchain.mdx b/docs/components/vectordbs/dbs/langchain.mdx index 8c2a2f1a..d87ff583 100644 --- a/docs/components/vectordbs/dbs/langchain.mdx +++ b/docs/components/vectordbs/dbs/langchain.mdx @@ -44,6 +44,33 @@ messages = [ ] m.add(messages, user_id="alice", metadata={"category": "movies"}) ``` + +```typescript TypeScript +import { Memory } from "mem0ai"; +import { OpenAIEmbeddings } from "@langchain/openai"; +import { MemoryVectorStore as LangchainMemoryStore } from "langchain/vectorstores/memory"; + +const embeddings = new OpenAIEmbeddings(); +const vectorStore = new LangchainVectorStore(embeddings); + +const config = { + "vector_store": { + "provider": "langchain", + "config": { "client": vectorStore } + } +} + +const memory = new Memory(config); + +const messages = [ + { role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" }, + { role: "assistant", content: "How about a thriller movies? They can be quite engaging." }, + { role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." }, + { role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." } +] + +memory.add(messages, user_id="alice", metadata={"category": "movies"}) +``` ## Supported LangChain Vector Stores diff --git a/mem0-ts/package.json b/mem0-ts/package.json index 3727095f..833e223b 100644 --- a/mem0-ts/package.json +++ b/mem0-ts/package.json @@ -1,6 +1,6 @@ { "name": "mem0ai", - "version": "2.1.16-patch.1", + "version": "2.1.17", "description": "The Memory Layer For Your AI Apps", "main": "./dist/index.js", "module": "./dist/index.mjs", @@ -98,14 +98,15 @@ }, "peerDependencies": { "@anthropic-ai/sdk": "0.18.0", - "@qdrant/js-client-rest": "1.13.0", - "@mistralai/mistralai": "^1.5.2", "@google/genai": "^0.7.0", + "@mistralai/mistralai": "^1.5.2", + "@qdrant/js-client-rest": "1.13.0", "@supabase/supabase-js": "^2.49.1", "@types/jest": "29.5.14", "@types/pg": "8.11.0", "@types/sqlite3": "3.1.11", "groq-sdk": "0.3.0", + "@langchain/core": "^0.3.44", "neo4j-driver": "^5.28.1", "ollama": "^0.5.14", "pg": "8.11.3", diff --git a/mem0-ts/pnpm-lock.yaml b/mem0-ts/pnpm-lock.yaml index 76836512..f9012f08 100644 --- a/mem0-ts/pnpm-lock.yaml +++ b/mem0-ts/pnpm-lock.yaml @@ -13,6 +13,9 @@ importers: "@google/genai": specifier: ^0.7.0 version: 0.7.0(encoding@0.1.13) + "@langchain/core": + specifier: ^0.3.44 + version: 0.3.44(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2)) "@mistralai/mistralai": specifier: ^1.5.2 version: 1.5.2(zod@3.24.2) @@ -376,6 +379,12 @@ packages: integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==, } + "@cfworker/json-schema@4.1.1": + resolution: + { + integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==, + } + "@cspotcode/source-map-support@0.8.1": resolution: { @@ -796,6 +805,13 @@ packages: integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==, } + "@langchain/core@0.3.44": + resolution: + { + integrity: sha512-3BsSFf7STvPPZyl2kMANgtVnCUvDdyP4k+koP+nY2Tczd5V+RFkuazIn/JOj/xxy/neZjr4PxFU4BFyF1aKXOA==, + } + engines: { node: ">=18" } + "@mistralai/mistralai@1.5.2": resolution: { @@ -1263,6 +1279,12 @@ packages: integrity: sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==, } + "@types/retry@0.12.0": + resolution: + { + integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==, + } + "@types/sqlite3@3.1.11": resolution: { @@ -1275,6 +1297,12 @@ packages: integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==, } + "@types/uuid@10.0.0": + resolution: + { + integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==, + } + "@types/uuid@9.0.8": resolution: { @@ -1812,6 +1840,12 @@ packages: integrity: sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==, } + console-table-printer@2.12.1: + resolution: + { + integrity: sha512-wKGOQRRvdnd89pCeH96e2Fn4wkbenSP6LMHfjfyNLMbGuHEFbMqQNuxXqd0oXG9caIOQ1FTvc5Uijp9/4jujnQ==, + } + convert-source-map@2.0.0: resolution: { @@ -1857,6 +1891,13 @@ packages: supports-color: optional: true + decamelize@1.2.0: + resolution: + { + integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==, + } + engines: { node: ">=0.10.0" } + decompress-response@6.0.0: resolution: { @@ -2091,6 +2132,12 @@ packages: } engines: { node: ">=6" } + eventemitter3@4.0.7: + resolution: + { + integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==, + } + execa@5.1.1: resolution: { @@ -2928,6 +2975,12 @@ packages: } engines: { node: ">=10" } + js-tiktoken@1.0.19: + resolution: + { + integrity: sha512-XC63YQeEcS47Y53gg950xiZ4IWmkfMe4p2V9OSaBt26q+p47WHn18izuXzSclCI73B7yGqtfRsT6jcZQI0y08g==, + } + js-tokens@4.0.0: resolution: { @@ -3007,6 +3060,17 @@ packages: integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==, } + langsmith@0.3.15: + resolution: + { + integrity: sha512-cv3ebg0Hh0gRbl72cv/uzaZ+KOdfa2mGF1s74vmB2vlNVO/Ap/O9RYaHV+tpR8nwhGZ50R3ILnTOwSwGP+XQxw==, + } + peerDependencies: + openai: "*" + peerDependenciesMeta: + openai: + optional: true + leven@3.1.0: resolution: { @@ -3275,6 +3339,13 @@ packages: integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==, } + mustache@4.2.0: + resolution: + { + integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==, + } + hasBin: true + mz@2.7.0: resolution: { @@ -3470,6 +3541,13 @@ packages: zod: optional: true + p-finally@1.0.0: + resolution: + { + integrity: sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==, + } + engines: { node: ">=4" } + p-limit@2.3.0: resolution: { @@ -3498,6 +3576,27 @@ packages: } engines: { node: ">=10" } + p-queue@6.6.2: + resolution: + { + integrity: sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==, + } + engines: { node: ">=8" } + + p-retry@4.6.2: + resolution: + { + integrity: sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==, + } + engines: { node: ">=8" } + + p-timeout@3.2.0: + resolution: + { + integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==, + } + engines: { node: ">=8" } + p-try@2.2.0: resolution: { @@ -3936,6 +4035,13 @@ packages: } engines: { node: ">= 4" } + retry@0.13.1: + resolution: + { + integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==, + } + engines: { node: ">= 4" } + reusify@1.1.0: resolution: { @@ -4057,6 +4163,12 @@ packages: } engines: { node: ">=10" } + simple-wcswidth@1.0.1: + resolution: + { + integrity: sha512-xMO/8eNREtaROt7tJvWJqHBDTMFN4eiQ5I4JRMuilwfnFcV5W9u7RUkueNkdw0jPqGMX36iCywelS5yilTuOxg==, + } + sisteransi@1.0.5: resolution: { @@ -4550,6 +4662,13 @@ packages: integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==, } + uuid@10.0.0: + resolution: + { + integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==, + } + hasBin: true + uuid@9.0.1: resolution: { @@ -4957,6 +5076,8 @@ snapshots: "@bcoe/v8-coverage@0.2.3": {} + "@cfworker/json-schema@4.1.1": {} + "@cspotcode/source-map-support@0.8.1": dependencies: "@jridgewell/trace-mapping": 0.3.9 @@ -5254,6 +5375,23 @@ snapshots: "@jridgewell/resolve-uri": 3.1.2 "@jridgewell/sourcemap-codec": 1.5.0 + "@langchain/core@0.3.44(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2))": + dependencies: + "@cfworker/json-schema": 4.1.1 + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.19 + langsmith: 0.3.15(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2)) + mustache: 4.2.0 + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 10.0.0 + zod: 3.24.2 + zod-to-json-schema: 3.24.5(zod@3.24.2) + transitivePeerDependencies: + - openai + "@mistralai/mistralai@1.5.2(zod@3.24.2)": dependencies: zod: 3.24.2 @@ -5511,12 +5649,16 @@ snapshots: "@types/phoenix@1.6.6": {} + "@types/retry@0.12.0": {} + "@types/sqlite3@3.1.11": dependencies: "@types/node": 22.13.5 "@types/stack-utils@2.0.3": {} + "@types/uuid@10.0.0": {} + "@types/uuid@9.0.8": {} "@types/ws@8.18.0": @@ -5844,6 +5986,10 @@ snapshots: console-control-strings@1.1.0: optional: true + console-table-printer@2.12.1: + dependencies: + simple-wcswidth: 1.0.1 + convert-source-map@2.0.0: {} create-jest@29.7.0(@types/node@22.13.5)(ts-node@10.9.2(@types/node@22.13.5)(typescript@5.5.4)): @@ -5877,6 +6023,8 @@ snapshots: optionalDependencies: supports-color: 5.5.0 + decamelize@1.2.0: {} + decompress-response@6.0.0: dependencies: mimic-response: 3.1.0 @@ -6001,6 +6149,8 @@ snapshots: event-target-shim@5.0.1: {} + eventemitter3@4.0.7: {} + execa@5.1.1: dependencies: cross-spawn: 7.0.6 @@ -6720,6 +6870,10 @@ snapshots: joycon@3.1.1: {} + js-tiktoken@1.0.19: + dependencies: + base64-js: 1.5.1 + js-tokens@4.0.0: {} js-yaml@3.14.1: @@ -6757,6 +6911,18 @@ snapshots: kolorist@1.8.0: {} + langsmith@0.3.15(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2)): + dependencies: + "@types/uuid": 10.0.0 + chalk: 4.1.2 + console-table-printer: 2.12.1 + p-queue: 6.6.2 + p-retry: 4.6.2 + semver: 7.7.1 + uuid: 10.0.0 + optionalDependencies: + openai: 4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2) + leven@3.1.0: {} lilconfig@3.1.3: {} @@ -6908,6 +7074,8 @@ snapshots: ms@2.1.3: {} + mustache@4.2.0: {} + mz@2.7.0: dependencies: any-promise: 1.3.0 @@ -7042,6 +7210,8 @@ snapshots: transitivePeerDependencies: - encoding + p-finally@1.0.0: {} + p-limit@2.3.0: dependencies: p-try: 2.2.0 @@ -7059,6 +7229,20 @@ snapshots: aggregate-error: 3.1.0 optional: true + p-queue@6.6.2: + dependencies: + eventemitter3: 4.0.7 + p-timeout: 3.2.0 + + p-retry@4.6.2: + dependencies: + "@types/retry": 0.12.0 + retry: 0.13.1 + + p-timeout@3.2.0: + dependencies: + p-finally: 1.0.0 + p-try@2.2.0: {} package-json-from-dist@1.0.1: {} @@ -7297,6 +7481,8 @@ snapshots: retry@0.12.0: optional: true + retry@0.13.1: {} + reusify@1.1.0: {} rimraf@3.0.2: @@ -7376,6 +7562,8 @@ snapshots: dependencies: semver: 7.7.1 + simple-wcswidth@1.0.1: {} + sisteransi@1.0.5: {} slash@3.0.0: {} @@ -7687,6 +7875,8 @@ snapshots: util-deprecate@1.0.2: {} + uuid@10.0.0: {} + uuid@9.0.1: {} v8-compile-cache-lib@3.0.1: {} diff --git a/mem0-ts/src/oss/src/config/defaults.ts b/mem0-ts/src/oss/src/config/defaults.ts index a855e5b3..b2c9d19f 100644 --- a/mem0-ts/src/oss/src/config/defaults.ts +++ b/mem0-ts/src/oss/src/config/defaults.ts @@ -22,6 +22,7 @@ export const DEFAULT_MEMORY_CONFIG: MemoryConfig = { config: { apiKey: process.env.OPENAI_API_KEY || "", model: "gpt-4-turbo-preview", + modelProperties: undefined, }, }, enableGraph: false, diff --git a/mem0-ts/src/oss/src/config/manager.ts b/mem0-ts/src/oss/src/config/manager.ts index 1e50f355..4e9cbb46 100644 --- a/mem0-ts/src/oss/src/config/manager.ts +++ b/mem0-ts/src/oss/src/config/manager.ts @@ -9,43 +9,84 @@ export class ConfigManager { provider: userConfig.embedder?.provider || DEFAULT_MEMORY_CONFIG.embedder.provider, - config: { - apiKey: - userConfig.embedder?.config?.apiKey || - DEFAULT_MEMORY_CONFIG.embedder.config.apiKey, - model: - userConfig.embedder?.config?.model || - DEFAULT_MEMORY_CONFIG.embedder.config.model, - }, + config: (() => { + const defaultConf = DEFAULT_MEMORY_CONFIG.embedder.config; + const userConf = userConfig.embedder?.config; + let finalModel: string | any = defaultConf.model; + + if (userConf?.model && typeof userConf.model === "object") { + finalModel = userConf.model; + } else if (userConf?.model && typeof userConf.model === "string") { + finalModel = userConf.model; + } + + return { + apiKey: + userConf?.apiKey !== undefined + ? userConf.apiKey + : defaultConf.apiKey, + model: finalModel, + url: userConf?.url, + }; + })(), }, vectorStore: { provider: userConfig.vectorStore?.provider || DEFAULT_MEMORY_CONFIG.vectorStore.provider, - config: { - collectionName: - userConfig.vectorStore?.config?.collectionName || - DEFAULT_MEMORY_CONFIG.vectorStore.config.collectionName, - dimension: - userConfig.vectorStore?.config?.dimension || - DEFAULT_MEMORY_CONFIG.vectorStore.config.dimension, - ...userConfig.vectorStore?.config, - }, + config: (() => { + const defaultConf = DEFAULT_MEMORY_CONFIG.vectorStore.config; + const userConf = userConfig.vectorStore?.config; + + // Prioritize user-provided client instance + if (userConf?.client && typeof userConf.client === "object") { + return { + client: userConf.client, + // Include other fields from userConf if necessary, or omit defaults + collectionName: userConf.collectionName, // Can be undefined + dimension: userConf.dimension || defaultConf.dimension, // Merge dimension + ...userConf, // Include any other passthrough fields from user + }; + } else { + // If no client provided, merge standard fields + return { + collectionName: + userConf?.collectionName || defaultConf.collectionName, + dimension: userConf?.dimension || defaultConf.dimension, + // Ensure client is not carried over from defaults if not provided by user + client: undefined, + // Include other passthrough fields from userConf even if no client + ...userConf, + }; + } + })(), }, llm: { provider: userConfig.llm?.provider || DEFAULT_MEMORY_CONFIG.llm.provider, - config: { - apiKey: - userConfig.llm?.config?.apiKey || - DEFAULT_MEMORY_CONFIG.llm.config.apiKey, - model: - userConfig.llm?.config?.model || - DEFAULT_MEMORY_CONFIG.llm.config.model, - modelProperties: - userConfig.llm?.config?.modelProperties || - DEFAULT_MEMORY_CONFIG.llm.config.modelProperties, - }, + config: (() => { + const defaultConf = DEFAULT_MEMORY_CONFIG.llm.config; + const userConf = userConfig.llm?.config; + let finalModel: string | any = defaultConf.model; + + if (userConf?.model && typeof userConf.model === "object") { + finalModel = userConf.model; + } else if (userConf?.model && typeof userConf.model === "string") { + finalModel = userConf.model; + } + + return { + apiKey: + userConf?.apiKey !== undefined + ? userConf.apiKey + : defaultConf.apiKey, + model: finalModel, + modelProperties: + userConf?.modelProperties !== undefined + ? userConf.modelProperties + : defaultConf.modelProperties, + }; + })(), }, historyDbPath: userConfig.historyDbPath || DEFAULT_MEMORY_CONFIG.historyDbPath, diff --git a/mem0-ts/src/oss/src/embeddings/langchain.ts b/mem0-ts/src/oss/src/embeddings/langchain.ts new file mode 100644 index 00000000..7ffb8506 --- /dev/null +++ b/mem0-ts/src/oss/src/embeddings/langchain.ts @@ -0,0 +1,50 @@ +import { Embeddings } from "@langchain/core/embeddings"; +import { Embedder } from "./base"; +import { EmbeddingConfig } from "../types"; + +export class LangchainEmbedder implements Embedder { + private embedderInstance: Embeddings; + private batchSize?: number; // Some LC embedders have batch size + + constructor(config: EmbeddingConfig) { + // Check if config.model is provided and is an object (the instance) + if (!config.model || typeof config.model !== "object") { + throw new Error( + "Langchain embedder provider requires an initialized Langchain Embeddings instance passed via the 'model' field in the embedder config.", + ); + } + // Basic check for embedding methods + if ( + typeof (config.model as any).embedQuery !== "function" || + typeof (config.model as any).embedDocuments !== "function" + ) { + throw new Error( + "Provided Langchain 'instance' in the 'model' field does not appear to be a valid Langchain Embeddings instance (missing embedQuery or embedDocuments method).", + ); + } + this.embedderInstance = config.model as Embeddings; + // Store batch size if the instance has it (optional) + this.batchSize = (this.embedderInstance as any).batchSize; + } + + async embed(text: string): Promise { + try { + // Use embedQuery for single text embedding + return await this.embedderInstance.embedQuery(text); + } catch (error) { + console.error("Error embedding text with Langchain Embedder:", error); + throw error; + } + } + + async embedBatch(texts: string[]): Promise { + try { + // Use embedDocuments for batch embedding + // Langchain's embedDocuments handles batching internally if needed/supported + return await this.embedderInstance.embedDocuments(texts); + } catch (error) { + console.error("Error embedding batch with Langchain Embedder:", error); + throw error; + } + } +} diff --git a/mem0-ts/src/oss/src/graphs/tools.ts b/mem0-ts/src/oss/src/graphs/tools.ts index 57262499..e0768521 100644 --- a/mem0-ts/src/oss/src/graphs/tools.ts +++ b/mem0-ts/src/oss/src/graphs/tools.ts @@ -1,3 +1,5 @@ +import { z } from "zod"; + export interface GraphToolParameters { source: string; destination: string; @@ -21,6 +23,58 @@ export interface GraphRelationsParameters { }>; } +// --- Zod Schemas for Tool Arguments --- + +// Schema for simple relationship arguments (Update, Delete) +export const GraphSimpleRelationshipArgsSchema = z.object({ + source: z + .string() + .describe("The identifier of the source node in the relationship."), + relationship: z + .string() + .describe("The relationship between the source and destination nodes."), + destination: z + .string() + .describe("The identifier of the destination node in the relationship."), +}); + +// Schema for adding a relationship (includes types) +export const GraphAddRelationshipArgsSchema = + GraphSimpleRelationshipArgsSchema.extend({ + source_type: z + .string() + .describe("The type or category of the source node."), + destination_type: z + .string() + .describe("The type or category of the destination node."), + }); + +// Schema for extracting entities +export const GraphExtractEntitiesArgsSchema = z.object({ + entities: z + .array( + z.object({ + entity: z.string().describe("The name or identifier of the entity."), + entity_type: z.string().describe("The type or category of the entity."), + }), + ) + .describe("An array of entities with their types."), +}); + +// Schema for establishing relationships +export const GraphRelationsArgsSchema = z.object({ + entities: z + .array(GraphSimpleRelationshipArgsSchema) + .describe("An array of relationships (source, relationship, destination)."), +}); + +// --- Tool Definitions (using JSON schema, keep as is) --- + +// Note: The tool definitions themselves still use JSON schema format +// as expected by the LLM APIs. The Zod schemas above are for internal +// validation and potentially for use with Langchain's .withStructuredOutput +// if we adapt it to handle tool calls via schema. + export const UPDATE_MEMORY_TOOL_GRAPH = { type: "function", function: { diff --git a/mem0-ts/src/oss/src/index.ts b/mem0-ts/src/oss/src/index.ts index 13508000..c14f3730 100644 --- a/mem0-ts/src/oss/src/index.ts +++ b/mem0-ts/src/oss/src/index.ts @@ -5,6 +5,7 @@ export * from "./embeddings/base"; export * from "./embeddings/openai"; export * from "./embeddings/ollama"; export * from "./embeddings/google"; +export * from "./embeddings/langchain"; export * from "./llms/base"; export * from "./llms/openai"; export * from "./llms/google"; @@ -13,8 +14,11 @@ export * from "./llms/anthropic"; export * from "./llms/groq"; export * from "./llms/ollama"; export * from "./llms/mistral"; +export * from "./llms/langchain"; export * from "./vector_stores/base"; export * from "./vector_stores/memory"; export * from "./vector_stores/qdrant"; export * from "./vector_stores/redis"; +export * from "./vector_stores/supabase"; +export * from "./vector_stores/langchain"; export * from "./utils/factory"; diff --git a/mem0-ts/src/oss/src/llms/base.ts b/mem0-ts/src/oss/src/llms/base.ts index b15575fc..24990cc5 100644 --- a/mem0-ts/src/oss/src/llms/base.ts +++ b/mem0-ts/src/oss/src/llms/base.ts @@ -12,7 +12,7 @@ export interface LLMResponse { export interface LLM { generateResponse( messages: Array<{ role: string; content: string }>, - response_format: { type: string }, + response_format?: { type: string }, tools?: any[], ): Promise; generateChat(messages: Message[]): Promise; diff --git a/mem0-ts/src/oss/src/llms/langchain.ts b/mem0-ts/src/oss/src/llms/langchain.ts new file mode 100644 index 00000000..9acaf11f --- /dev/null +++ b/mem0-ts/src/oss/src/llms/langchain.ts @@ -0,0 +1,255 @@ +import { BaseLanguageModel } from "@langchain/core/language_models/base"; +import { + AIMessage, + HumanMessage, + SystemMessage, + BaseMessage, +} from "@langchain/core/messages"; +import { z } from "zod"; +import { LLM, LLMResponse } from "./base"; +import { LLMConfig, Message } from "../types/index"; +// Import the schemas directly into LangchainLLM +import { FactRetrievalSchema, MemoryUpdateSchema } from "../prompts"; +// Import graph tool argument schemas +import { + GraphExtractEntitiesArgsSchema, + GraphRelationsArgsSchema, + GraphSimpleRelationshipArgsSchema, // Used for delete tool +} from "../graphs/tools"; + +const convertToLangchainMessages = (messages: Message[]): BaseMessage[] => { + return messages.map((msg) => { + const content = + typeof msg.content === "string" + ? msg.content + : JSON.stringify(msg.content); + switch (msg.role?.toLowerCase()) { + case "system": + return new SystemMessage(content); + case "user": + case "human": + return new HumanMessage(content); + case "assistant": + case "ai": + return new AIMessage(content); + default: + console.warn( + `Unsupported message role '${msg.role}' for Langchain. Treating as 'human'.`, + ); + return new HumanMessage(content); + } + }); +}; + +export class LangchainLLM implements LLM { + private llmInstance: BaseLanguageModel; + private modelName: string; + + constructor(config: LLMConfig) { + if (!config.model || typeof config.model !== "object") { + throw new Error( + "Langchain provider requires an initialized Langchain instance passed via the 'model' field in the LLM config.", + ); + } + if (typeof (config.model as any).invoke !== "function") { + throw new Error( + "Provided Langchain 'instance' in the 'model' field does not appear to be a valid Langchain language model (missing invoke method).", + ); + } + this.llmInstance = config.model as BaseLanguageModel; + this.modelName = + (this.llmInstance as any).modelId || + (this.llmInstance as any).model || + "langchain-model"; + } + + async generateResponse( + messages: Message[], + response_format?: { type: string }, + tools?: any[], + ): Promise { + const langchainMessages = convertToLangchainMessages(messages); + let runnable: any = this.llmInstance; + const invokeOptions: Record = {}; + let isStructuredOutput = false; + let selectedSchema: z.ZodSchema | null = null; + let isToolCallResponse = false; + + // --- Internal Schema Selection Logic (runs regardless of response_format) --- + const systemPromptContent = + (messages.find((m) => m.role === "system")?.content as string) || ""; + const userPromptContent = + (messages.find((m) => m.role === "user")?.content as string) || ""; + const toolNames = tools?.map((t) => t.function.name) || []; + + // Prioritize tool call argument schemas + if (toolNames.includes("extract_entities")) { + selectedSchema = GraphExtractEntitiesArgsSchema; + isToolCallResponse = true; + } else if (toolNames.includes("establish_relationships")) { + selectedSchema = GraphRelationsArgsSchema; + isToolCallResponse = true; + } else if (toolNames.includes("delete_graph_memory")) { + selectedSchema = GraphSimpleRelationshipArgsSchema; + isToolCallResponse = true; + } + // Check for memory prompts if no tool schema matched + else if ( + systemPromptContent.includes("Personal Information Organizer") && + systemPromptContent.includes("extract relevant pieces of information") + ) { + selectedSchema = FactRetrievalSchema; + } else if ( + userPromptContent.includes("smart memory manager") && + userPromptContent.includes("Compare newly retrieved facts") + ) { + selectedSchema = MemoryUpdateSchema; + } + + // --- Apply Structured Output if Schema Selected --- + if ( + selectedSchema && + typeof (this.llmInstance as any).withStructuredOutput === "function" + ) { + // Apply if a schema was selected (for memory or single tool calls) + if ( + !isToolCallResponse || + (isToolCallResponse && tools && tools.length === 1) + ) { + try { + runnable = (this.llmInstance as any).withStructuredOutput( + selectedSchema, + { name: tools?.[0]?.function.name }, + ); + isStructuredOutput = true; + } catch (e) { + isStructuredOutput = false; // Ensure flag is false on error + // No fallback to response_format here unless explicitly passed + if (response_format?.type === "json_object") { + invokeOptions.response_format = { type: "json_object" }; + } + } + } else if (isToolCallResponse) { + // If multiple tools, don't apply structured output, handle via tool binding below + } + } else if (selectedSchema && response_format?.type === "json_object") { + // Schema selected, but no .withStructuredOutput. Try basic response_format only if explicitly requested. + if ( + (this.llmInstance as any)._identifyingParams?.response_format || + (this.llmInstance as any).response_format + ) { + invokeOptions.response_format = { type: "json_object" }; + } + } else if (!selectedSchema && response_format?.type === "json_object") { + // Explicit JSON request, but no schema inferred. Try basic response_format. + if ( + (this.llmInstance as any)._identifyingParams?.response_format || + (this.llmInstance as any).response_format + ) { + invokeOptions.response_format = { type: "json_object" }; + } + } + + // --- Handle tool binding --- + if (tools && tools.length > 0) { + if (typeof (runnable as any).bindTools === "function") { + try { + runnable = (runnable as any).bindTools(tools); + } catch (e) {} + } else { + } + } + + // --- Invoke and Process Response --- + try { + const response = await runnable.invoke(langchainMessages, invokeOptions); + + if (isStructuredOutput && !isToolCallResponse) { + // Memory prompt with structured output + return JSON.stringify(response); + } else if (isStructuredOutput && isToolCallResponse) { + // Tool call with structured arguments + if (response?.tool_calls && Array.isArray(response.tool_calls)) { + const mappedToolCalls = response.tool_calls.map((call: any) => ({ + name: call.name || tools?.[0]?.function.name || "unknown_tool", + arguments: + typeof call.args === "string" + ? call.args + : JSON.stringify(call.args), + })); + return { + content: response.content || "", + role: "assistant", + toolCalls: mappedToolCalls, + }; + } else { + // Direct object response for tool args + return { + content: "", + role: "assistant", + toolCalls: [ + { + name: tools?.[0]?.function.name || "unknown_tool", + arguments: JSON.stringify(response), + }, + ], + }; + } + } else if ( + response && + response.tool_calls && + Array.isArray(response.tool_calls) + ) { + // Standard tool call response (no structured output used/failed) + const mappedToolCalls = response.tool_calls.map((call: any) => ({ + name: call.name || "unknown_tool", + arguments: + typeof call.args === "string" + ? call.args + : JSON.stringify(call.args), + })); + return { + content: response.content || "", + role: "assistant", + toolCalls: mappedToolCalls, + }; + } else if (response && typeof response.content === "string") { + // Standard text response + return response.content; + } else { + // Fallback for unexpected formats + return JSON.stringify(response); + } + } catch (error) { + throw error; + } + } + + async generateChat(messages: Message[]): Promise { + const langchainMessages = convertToLangchainMessages(messages); + try { + const response = await this.llmInstance.invoke(langchainMessages); + if (response && typeof response.content === "string") { + return { + content: response.content, + role: (response as BaseMessage).lc_id ? "assistant" : "assistant", + }; + } else { + console.warn( + `Unexpected response format from Langchain instance (${this.modelName}) for generateChat:`, + response, + ); + return { + content: JSON.stringify(response), + role: "assistant", + }; + } + } catch (error) { + console.error( + `Error invoking Langchain instance (${this.modelName}) for generateChat:`, + error, + ); + throw error; + } + } +} diff --git a/mem0-ts/src/oss/src/memory/index.ts b/mem0-ts/src/oss/src/memory/index.ts index 42e8e350..1b43747c 100644 --- a/mem0-ts/src/oss/src/memory/index.ts +++ b/mem0-ts/src/oss/src/memory/index.ts @@ -43,7 +43,7 @@ export class Memory { private vectorStore: VectorStore; private llm: LLM; private db: HistoryManager; - private collectionName: string; + private collectionName: string | undefined; private apiVersion: string; private graphMemory?: MemoryGraph; private enableGraph: boolean; @@ -241,12 +241,10 @@ export class Memory { } const parsedMessages = messages.map((m) => m.content).join("\n"); - // Get prompts const [systemPrompt, userPrompt] = this.customPrompt ? [this.customPrompt, `Input:\n${parsedMessages}`] : getFactRetrievalMessages(parsedMessages); - // Extract facts using LLM const response = await this.llm.generateResponse( [ { role: "system", content: systemPrompt }, @@ -255,8 +253,18 @@ export class Memory { { type: "json_object" }, ); - const cleanResponse = removeCodeBlocks(response); - const facts = JSON.parse(cleanResponse).facts || []; + const cleanResponse = removeCodeBlocks(response as string); + let facts: string[] = []; + try { + facts = JSON.parse(cleanResponse).facts || []; + } catch (e) { + console.error( + "Failed to parse facts from LLM response:", + cleanResponse, + e, + ); + facts = []; + } // Get embeddings for new facts const newMessageEmbeddings: Record = {}; @@ -292,13 +300,24 @@ export class Memory { // Get memory update decisions const updatePrompt = getUpdateMemoryMessages(uniqueOldMemories, facts); + const updateResponse = await this.llm.generateResponse( [{ role: "user", content: updatePrompt }], { type: "json_object" }, ); - const cleanUpdateResponse = removeCodeBlocks(updateResponse); - const memoryActions = JSON.parse(cleanUpdateResponse).memory || []; + const cleanUpdateResponse = removeCodeBlocks(updateResponse as string); + let memoryActions: any[] = []; + try { + memoryActions = JSON.parse(cleanUpdateResponse).memory || []; + } catch (e) { + console.error( + "Failed to parse memory actions from LLM response:", + cleanUpdateResponse, + e, + ); + memoryActions = []; + } // Process memory actions const results: MemoryItem[] = []; @@ -511,14 +530,47 @@ export class Memory { async reset(): Promise { await this._captureEvent("reset"); await this.db.reset(); - await this.vectorStore.deleteCol(); - if (this.graphMemory) { - await this.graphMemory.deleteAll({ userId: "default" }); + + // Check provider before attempting deleteCol + if (this.config.vectorStore.provider.toLowerCase() !== "langchain") { + try { + await this.vectorStore.deleteCol(); + } catch (e) { + console.error( + `Failed to delete collection for provider '${this.config.vectorStore.provider}':`, + e, + ); + // Decide if you want to re-throw or just log + } + } else { + console.warn( + "Memory.reset(): Skipping vector store collection deletion as 'langchain' provider is used. Underlying Langchain vector store data is not cleared by this operation.", + ); } + + if (this.graphMemory) { + await this.graphMemory.deleteAll({ userId: "default" }); // Assuming this is okay, or needs similar check? + } + + // Re-initialize factories/clients based on the original config + this.embedder = EmbedderFactory.create( + this.config.embedder.provider, + this.config.embedder.config, + ); + // Re-create vector store instance - crucial for Langchain to reset wrapper state if needed this.vectorStore = VectorStoreFactory.create( this.config.vectorStore.provider, - this.config.vectorStore.config, + this.config.vectorStore.config, // This will pass the original client instance back ); + this.llm = LLMFactory.create( + this.config.llm.provider, + this.config.llm.config, + ); + // Re-init DB if needed (though db.reset() likely handles its state) + // Re-init Graph if needed + + // Re-initialize telemetry + this._initializeTelemetry(); } async getAll(config: GetAllMemoryOptions): Promise { diff --git a/mem0-ts/src/oss/src/prompts/index.ts b/mem0-ts/src/oss/src/prompts/index.ts index 8220d6f0..ef8c7975 100644 --- a/mem0-ts/src/oss/src/prompts/index.ts +++ b/mem0-ts/src/oss/src/prompts/index.ts @@ -1,3 +1,37 @@ +import { z } from "zod"; + +// Define Zod schema for fact retrieval output +export const FactRetrievalSchema = z.object({ + facts: z + .array(z.string()) + .describe("An array of distinct facts extracted from the conversation."), +}); + +// Define Zod schema for memory update output +export const MemoryUpdateSchema = z.object({ + memory: z + .array( + z.object({ + id: z.string().describe("The unique identifier of the memory item."), + text: z.string().describe("The content of the memory item."), + event: z + .enum(["ADD", "UPDATE", "DELETE", "NONE"]) + .describe( + "The action taken for this memory item (ADD, UPDATE, DELETE, or NONE).", + ), + old_memory: z + .string() + .optional() + .describe( + "The previous content of the memory item if the event was UPDATE.", + ), + }), + ) + .describe( + "An array representing the state of memory items after processing new facts.", + ), +}); + export function getFactRetrievalMessages( parsedMessages: string, ): [string, string] { diff --git a/mem0-ts/src/oss/src/types/index.ts b/mem0-ts/src/oss/src/types/index.ts index 3c15d668..0cbd1c19 100644 --- a/mem0-ts/src/oss/src/types/index.ts +++ b/mem0-ts/src/oss/src/types/index.ts @@ -14,13 +14,15 @@ export interface Message { export interface EmbeddingConfig { apiKey?: string; - model?: string; + model?: string | any; url?: string; } export interface VectorStoreConfig { - collectionName: string; + collectionName?: string; dimension?: number; + client?: any; + instance?: any; [key: string]: any; } @@ -38,7 +40,7 @@ export interface LLMConfig { provider?: string; config?: Record; apiKey?: string; - model?: string; + model?: string | any; modelProperties?: Record; } @@ -110,24 +112,25 @@ export const MemoryConfigSchema = z.object({ embedder: z.object({ provider: z.string(), config: z.object({ - apiKey: z.string(), - model: z.string().optional(), + apiKey: z.string().optional(), + model: z.union([z.string(), z.any()]).optional(), }), }), vectorStore: z.object({ provider: z.string(), config: z .object({ - collectionName: z.string(), + collectionName: z.string().optional(), dimension: z.number().optional(), + client: z.any().optional(), }) .passthrough(), }), llm: z.object({ provider: z.string(), config: z.object({ - apiKey: z.string(), - model: z.string().optional(), + apiKey: z.string().optional(), + model: z.union([z.string(), z.any()]).optional(), modelProperties: z.record(z.string(), z.any()).optional(), }), }), diff --git a/mem0-ts/src/oss/src/utils/factory.ts b/mem0-ts/src/oss/src/utils/factory.ts index f5dc222f..db9c609f 100644 --- a/mem0-ts/src/oss/src/utils/factory.ts +++ b/mem0-ts/src/oss/src/utils/factory.ts @@ -26,6 +26,9 @@ import { HistoryManager } from "../storage/base"; import { GoogleEmbedder } from "../embeddings/google"; import { GoogleLLM } from "../llms/google"; import { AzureOpenAILLM } from "../llms/azure"; +import { LangchainLLM } from "../llms/langchain"; +import { LangchainEmbedder } from "../embeddings/langchain"; +import { LangchainVectorStore } from "../vector_stores/langchain"; export class EmbedderFactory { static create(provider: string, config: EmbeddingConfig): Embedder { @@ -36,6 +39,8 @@ export class EmbedderFactory { return new OllamaEmbedder(config); case "google": return new GoogleEmbedder(config); + case "langchain": + return new LangchainEmbedder(config); default: throw new Error(`Unsupported embedder provider: ${provider}`); } @@ -44,7 +49,7 @@ export class EmbedderFactory { export class LLMFactory { static create(provider: string, config: LLMConfig): LLM { - switch (provider) { + switch (provider.toLowerCase()) { case "openai": return new OpenAILLM(config); case "openai_structured": @@ -61,6 +66,8 @@ export class LLMFactory { return new AzureOpenAILLM(config); case "mistral": return new MistralLLM(config); + case "langchain": + return new LangchainLLM(config); default: throw new Error(`Unsupported LLM provider: ${provider}`); } @@ -73,11 +80,13 @@ export class VectorStoreFactory { case "memory": return new MemoryVectorStore(config); case "qdrant": - return new Qdrant(config as any); // Type assertion needed as config is extended + return new Qdrant(config as any); case "redis": - return new RedisDB(config as any); // Type assertion needed as config is extended + return new RedisDB(config as any); case "supabase": - return new SupabaseDB(config as any); // Type assertion needed as config is extended + return new SupabaseDB(config as any); + case "langchain": + return new LangchainVectorStore(config as any); default: throw new Error(`Unsupported vector store provider: ${provider}`); } diff --git a/mem0-ts/src/oss/src/vector_stores/langchain.ts b/mem0-ts/src/oss/src/vector_stores/langchain.ts new file mode 100644 index 00000000..852ecaa4 --- /dev/null +++ b/mem0-ts/src/oss/src/vector_stores/langchain.ts @@ -0,0 +1,231 @@ +import { VectorStore as LangchainVectorStoreInterface } from "@langchain/core/vectorstores"; +import { Document } from "@langchain/core/documents"; +import { VectorStore } from "./base"; // mem0's VectorStore interface +import { SearchFilters, VectorStoreConfig, VectorStoreResult } from "../types"; + +// Config specifically for the Langchain wrapper +interface LangchainStoreConfig extends VectorStoreConfig { + client: LangchainVectorStoreInterface; + // dimension might still be useful for validation if not automatically inferred +} + +export class LangchainVectorStore implements VectorStore { + private lcStore: LangchainVectorStoreInterface; + private dimension?: number; + private storeUserId: string = "anonymous-langchain-user"; // Simple in-memory user ID + + constructor(config: LangchainStoreConfig) { + if (!config.client || typeof config.client !== "object") { + throw new Error( + "Langchain vector store provider requires an initialized Langchain VectorStore instance passed via the 'client' field.", + ); + } + // Basic checks for core methods + if ( + typeof config.client.addVectors !== "function" || + typeof config.client.similaritySearchVectorWithScore !== "function" + ) { + throw new Error( + "Provided Langchain 'client' does not appear to be a valid Langchain VectorStore (missing addVectors or similaritySearchVectorWithScore method).", + ); + } + + this.lcStore = config.client; + this.dimension = config.dimension; + + // Attempt to get dimension from the underlying store if not provided + if ( + !this.dimension && + (this.lcStore as any).embeddings?.embeddingDimension + ) { + this.dimension = (this.lcStore as any).embeddings.embeddingDimension; + } + if ( + !this.dimension && + (this.lcStore as any).embedding?.embeddingDimension + ) { + this.dimension = (this.lcStore as any).embedding.embeddingDimension; + } + // If still no dimension, we might need to throw or warn, as it's needed for validation + if (!this.dimension) { + console.warn( + "LangchainVectorStore: Could not determine embedding dimension. Input validation might be skipped.", + ); + } + } + + // --- Method Mappings --- + + async insert( + vectors: number[][], + ids: string[], + payloads: Record[], + ): Promise { + if (!ids || ids.length !== vectors.length) { + throw new Error( + "IDs array must be provided and have the same length as vectors.", + ); + } + if (this.dimension) { + vectors.forEach((v, i) => { + if (v.length !== this.dimension) { + throw new Error( + `Vector dimension mismatch at index ${i}. Expected ${this.dimension}, got ${v.length}`, + ); + } + }); + } + + // Convert payloads to Langchain Document metadata format + const documents = payloads.map((payload, i) => { + // Provide empty pageContent, store mem0 id and other data in metadata + return new Document({ + pageContent: "", // Add required empty pageContent + metadata: { ...payload, _mem0_id: ids[i] }, + }); + }); + + // Use addVectors. Note: Langchain stores often generate their own internal IDs. + // We store the mem0 ID in the metadata (`_mem0_id`). + try { + await this.lcStore.addVectors(vectors, documents, { ids }); // Pass mem0 ids if the store supports it + } catch (e) { + // Fallback if the store doesn't support passing ids directly during addVectors + console.warn( + "Langchain store might not support custom IDs on insert. Trying without IDs.", + e, + ); + await this.lcStore.addVectors(vectors, documents); + } + } + + async search( + query: number[], + limit: number = 5, + filters?: SearchFilters, // filters parameter is received but will be ignored + ): Promise { + if (this.dimension && query.length !== this.dimension) { + throw new Error( + `Query vector dimension mismatch. Expected ${this.dimension}, got ${query.length}`, + ); + } + + // --- Remove filter processing logic --- + // Filters passed via mem0 interface are not reliably translatable to generic Langchain stores. + // let lcFilter: any = undefined; + // if (filters && ...) { ... } + // console.warn("LangchainVectorStore: Passing filters directly..."); // Remove warning + + // Call similaritySearchVectorWithScore WITHOUT the filter argument + const results = await this.lcStore.similaritySearchVectorWithScore( + query, + limit, + // Do not pass lcFilter here + ); + + // Map Langchain results [Document, score] back to mem0 VectorStoreResult + return results.map(([doc, score]) => ({ + id: doc.metadata._mem0_id || "unknown_id", + payload: doc.metadata, + score: score, + })); + } + + // --- Methods with No Direct Langchain Equivalent (Throwing Errors) --- + + async get(vectorId: string): Promise { + // Most Langchain stores lack a direct getById. Simulation is inefficient. + console.error( + `LangchainVectorStore: The 'get' method is not directly supported by most Langchain VectorStores.`, + ); + throw new Error( + "Method 'get' not reliably supported by LangchainVectorStore wrapper.", + ); + // Potential (inefficient) simulation: + // Perform a search with a filter like { _mem0_id: vectorId }, limit 1. + // This requires the underlying store to support filtering on _mem0_id. + } + + async update( + vectorId: string, + vector: number[], + payload: Record, + ): Promise { + // Updates often require delete + add in Langchain. + console.error( + `LangchainVectorStore: The 'update' method is not directly supported. Use delete followed by insert.`, + ); + throw new Error( + "Method 'update' not supported by LangchainVectorStore wrapper.", + ); + // Possible implementation: Check if store has delete, call delete({_mem0_id: vectorId}), then insert. + } + + async delete(vectorId: string): Promise { + // Check if the underlying store supports deletion by ID + if (typeof (this.lcStore as any).delete === "function") { + try { + // We need to delete based on our stored _mem0_id. + // Langchain's delete often takes its own internal IDs or filter. + // Attempting deletion via filter is the most likely approach. + console.warn( + "LangchainVectorStore: Attempting delete via filter on '_mem0_id'. Success depends on the specific Langchain VectorStore's delete implementation.", + ); + await (this.lcStore as any).delete({ filter: { _mem0_id: vectorId } }); + // OR if it takes IDs directly (less common for *our* IDs): + // await (this.lcStore as any).delete({ ids: [vectorId] }); + } catch (e) { + console.error( + `LangchainVectorStore: Delete failed. Underlying store's delete method might expect different arguments or filters. Error: ${e}`, + ); + throw new Error(`Delete failed in underlying Langchain store: ${e}`); + } + } else { + console.error( + `LangchainVectorStore: The underlying Langchain store instance does not seem to support a 'delete' method.`, + ); + throw new Error( + "Method 'delete' not available on the provided Langchain VectorStore client.", + ); + } + } + + async list( + filters?: SearchFilters, + limit: number = 100, + ): Promise<[VectorStoreResult[], number]> { + // No standard list method in Langchain core interface. + console.error( + `LangchainVectorStore: The 'list' method is not supported by the generic LangchainVectorStore wrapper.`, + ); + throw new Error( + "Method 'list' not supported by LangchainVectorStore wrapper.", + ); + // Could potentially be implemented if the underlying store has a specific list/scroll/query capability. + } + + async deleteCol(): Promise { + console.error( + `LangchainVectorStore: The 'deleteCol' method is not supported by the generic LangchainVectorStore wrapper.`, + ); + throw new Error( + "Method 'deleteCol' not supported by LangchainVectorStore wrapper.", + ); + } + + // --- Wrapper-Specific Methods (In-Memory User ID) --- + + async getUserId(): Promise { + return this.storeUserId; + } + + async setUserId(userId: string): Promise { + this.storeUserId = userId; + } + + async initialize(): Promise { + // No specific initialization needed for the wrapper itself, + // assuming the passed Langchain client is already initialized. + return Promise.resolve(); + } +}