TypeScript OSS: Langchain Integration (#2556)

This commit is contained in:
Saket Aryan
2025-04-15 20:08:41 +05:30
committed by GitHub
parent 9f204dc557
commit c3c9205ffa
18 changed files with 1075 additions and 55 deletions

View File

@@ -127,6 +127,17 @@ mode: "wide"
<Tab title="TypeScript"> <Tab title="TypeScript">
<Update label="2025-04-14" description="v2.1.17">
**New Features:**
- **OSS SDK:** Added support for Langchain LLM
- **OSS SDK:** Added support for Langchain Embedder
- **OSS SDK:** Added support for Langchain Vector Store
**Improvements:**
- **OSS SDK:** Changed `model` in LLM and Embedder to use type any from `string` to use langchain llm models
- **OSS SDK:** Added client to vector store config for langchain vector store
</Update>
<Update label="2025-04-11" description="v2.1.16-patch.1"> <Update label="2025-04-11" description="v2.1.16-patch.1">
**Bug Fixes:** **Bug Fixes:**
- **Azure OpenAI:** Fixed issues with Azure OpenAI - **Azure OpenAI:** Fixed issues with Azure OpenAI

View File

@@ -42,6 +42,32 @@ messages = [
] ]
m.add(messages, user_id="alice", metadata={"category": "movies"}) m.add(messages, user_id="alice", metadata={"category": "movies"})
``` ```
```typescript TypeScript
import { Memory } from "mem0ai";
import { OpenAIEmbeddings } from "@langchain/openai";
const embeddings = new OpenAIEmbeddings();
const config = {
"embedder": {
"provider": "langchain",
"config": {
"model": embeddings
}
}
}
const memory = new Memory(config);
const messages = [
{ role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" },
{ role: "assistant", content: "How about a thriller movies? They can be quite engaging." },
{ role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." },
{ role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." }
]
memory.add(messages, user_id="alice", metadata={"category": "movies"})
```
</CodeGroup> </CodeGroup>
## Supported LangChain Embedding Providers ## Supported LangChain Embedding Providers

View File

@@ -43,6 +43,37 @@ messages = [
] ]
m.add(messages, user_id="alice", metadata={"category": "movies"}) m.add(messages, user_id="alice", metadata={"category": "movies"})
``` ```
```typescript TypeScript
import { Memory } from "mem0ai";
import { ChatOpenAI } from "@langchain/openai";
const openai_model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0.2,
max_tokens: 2000
})
const config = {
"llm": {
"provider": "langchain",
"config": {
"model": openai_model
}
}
}
const memory = new Memory(config);
const messages = [
{ role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" },
{ role: "assistant", content: "How about a thriller movies? They can be quite engaging." },
{ role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." },
{ role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." }
]
memory.add(messages, user_id="alice", metadata={"category": "movies"})
```
</CodeGroup> </CodeGroup>
## Supported LangChain Providers ## Supported LangChain Providers

View File

@@ -44,6 +44,33 @@ messages = [
] ]
m.add(messages, user_id="alice", metadata={"category": "movies"}) m.add(messages, user_id="alice", metadata={"category": "movies"})
``` ```
```typescript TypeScript
import { Memory } from "mem0ai";
import { OpenAIEmbeddings } from "@langchain/openai";
import { MemoryVectorStore as LangchainMemoryStore } from "langchain/vectorstores/memory";
const embeddings = new OpenAIEmbeddings();
const vectorStore = new LangchainVectorStore(embeddings);
const config = {
"vector_store": {
"provider": "langchain",
"config": { "client": vectorStore }
}
}
const memory = new Memory(config);
const messages = [
{ role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" },
{ role: "assistant", content: "How about a thriller movies? They can be quite engaging." },
{ role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." },
{ role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." }
]
memory.add(messages, user_id="alice", metadata={"category": "movies"})
```
</CodeGroup> </CodeGroup>
## Supported LangChain Vector Stores ## Supported LangChain Vector Stores

View File

@@ -1,6 +1,6 @@
{ {
"name": "mem0ai", "name": "mem0ai",
"version": "2.1.16-patch.1", "version": "2.1.17",
"description": "The Memory Layer For Your AI Apps", "description": "The Memory Layer For Your AI Apps",
"main": "./dist/index.js", "main": "./dist/index.js",
"module": "./dist/index.mjs", "module": "./dist/index.mjs",
@@ -98,14 +98,15 @@
}, },
"peerDependencies": { "peerDependencies": {
"@anthropic-ai/sdk": "0.18.0", "@anthropic-ai/sdk": "0.18.0",
"@qdrant/js-client-rest": "1.13.0",
"@mistralai/mistralai": "^1.5.2",
"@google/genai": "^0.7.0", "@google/genai": "^0.7.0",
"@mistralai/mistralai": "^1.5.2",
"@qdrant/js-client-rest": "1.13.0",
"@supabase/supabase-js": "^2.49.1", "@supabase/supabase-js": "^2.49.1",
"@types/jest": "29.5.14", "@types/jest": "29.5.14",
"@types/pg": "8.11.0", "@types/pg": "8.11.0",
"@types/sqlite3": "3.1.11", "@types/sqlite3": "3.1.11",
"groq-sdk": "0.3.0", "groq-sdk": "0.3.0",
"@langchain/core": "^0.3.44",
"neo4j-driver": "^5.28.1", "neo4j-driver": "^5.28.1",
"ollama": "^0.5.14", "ollama": "^0.5.14",
"pg": "8.11.3", "pg": "8.11.3",

190
mem0-ts/pnpm-lock.yaml generated
View File

@@ -13,6 +13,9 @@ importers:
"@google/genai": "@google/genai":
specifier: ^0.7.0 specifier: ^0.7.0
version: 0.7.0(encoding@0.1.13) version: 0.7.0(encoding@0.1.13)
"@langchain/core":
specifier: ^0.3.44
version: 0.3.44(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2))
"@mistralai/mistralai": "@mistralai/mistralai":
specifier: ^1.5.2 specifier: ^1.5.2
version: 1.5.2(zod@3.24.2) version: 1.5.2(zod@3.24.2)
@@ -376,6 +379,12 @@ packages:
integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==, integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==,
} }
"@cfworker/json-schema@4.1.1":
resolution:
{
integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==,
}
"@cspotcode/source-map-support@0.8.1": "@cspotcode/source-map-support@0.8.1":
resolution: resolution:
{ {
@@ -796,6 +805,13 @@ packages:
integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==, integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==,
} }
"@langchain/core@0.3.44":
resolution:
{
integrity: sha512-3BsSFf7STvPPZyl2kMANgtVnCUvDdyP4k+koP+nY2Tczd5V+RFkuazIn/JOj/xxy/neZjr4PxFU4BFyF1aKXOA==,
}
engines: { node: ">=18" }
"@mistralai/mistralai@1.5.2": "@mistralai/mistralai@1.5.2":
resolution: resolution:
{ {
@@ -1263,6 +1279,12 @@ packages:
integrity: sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==, integrity: sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==,
} }
"@types/retry@0.12.0":
resolution:
{
integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==,
}
"@types/sqlite3@3.1.11": "@types/sqlite3@3.1.11":
resolution: resolution:
{ {
@@ -1275,6 +1297,12 @@ packages:
integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==, integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==,
} }
"@types/uuid@10.0.0":
resolution:
{
integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==,
}
"@types/uuid@9.0.8": "@types/uuid@9.0.8":
resolution: resolution:
{ {
@@ -1812,6 +1840,12 @@ packages:
integrity: sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==, integrity: sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==,
} }
console-table-printer@2.12.1:
resolution:
{
integrity: sha512-wKGOQRRvdnd89pCeH96e2Fn4wkbenSP6LMHfjfyNLMbGuHEFbMqQNuxXqd0oXG9caIOQ1FTvc5Uijp9/4jujnQ==,
}
convert-source-map@2.0.0: convert-source-map@2.0.0:
resolution: resolution:
{ {
@@ -1857,6 +1891,13 @@ packages:
supports-color: supports-color:
optional: true optional: true
decamelize@1.2.0:
resolution:
{
integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==,
}
engines: { node: ">=0.10.0" }
decompress-response@6.0.0: decompress-response@6.0.0:
resolution: resolution:
{ {
@@ -2091,6 +2132,12 @@ packages:
} }
engines: { node: ">=6" } engines: { node: ">=6" }
eventemitter3@4.0.7:
resolution:
{
integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==,
}
execa@5.1.1: execa@5.1.1:
resolution: resolution:
{ {
@@ -2928,6 +2975,12 @@ packages:
} }
engines: { node: ">=10" } engines: { node: ">=10" }
js-tiktoken@1.0.19:
resolution:
{
integrity: sha512-XC63YQeEcS47Y53gg950xiZ4IWmkfMe4p2V9OSaBt26q+p47WHn18izuXzSclCI73B7yGqtfRsT6jcZQI0y08g==,
}
js-tokens@4.0.0: js-tokens@4.0.0:
resolution: resolution:
{ {
@@ -3007,6 +3060,17 @@ packages:
integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==, integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==,
} }
langsmith@0.3.15:
resolution:
{
integrity: sha512-cv3ebg0Hh0gRbl72cv/uzaZ+KOdfa2mGF1s74vmB2vlNVO/Ap/O9RYaHV+tpR8nwhGZ50R3ILnTOwSwGP+XQxw==,
}
peerDependencies:
openai: "*"
peerDependenciesMeta:
openai:
optional: true
leven@3.1.0: leven@3.1.0:
resolution: resolution:
{ {
@@ -3275,6 +3339,13 @@ packages:
integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==, integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==,
} }
mustache@4.2.0:
resolution:
{
integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==,
}
hasBin: true
mz@2.7.0: mz@2.7.0:
resolution: resolution:
{ {
@@ -3470,6 +3541,13 @@ packages:
zod: zod:
optional: true optional: true
p-finally@1.0.0:
resolution:
{
integrity: sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==,
}
engines: { node: ">=4" }
p-limit@2.3.0: p-limit@2.3.0:
resolution: resolution:
{ {
@@ -3498,6 +3576,27 @@ packages:
} }
engines: { node: ">=10" } engines: { node: ">=10" }
p-queue@6.6.2:
resolution:
{
integrity: sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==,
}
engines: { node: ">=8" }
p-retry@4.6.2:
resolution:
{
integrity: sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==,
}
engines: { node: ">=8" }
p-timeout@3.2.0:
resolution:
{
integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==,
}
engines: { node: ">=8" }
p-try@2.2.0: p-try@2.2.0:
resolution: resolution:
{ {
@@ -3936,6 +4035,13 @@ packages:
} }
engines: { node: ">= 4" } engines: { node: ">= 4" }
retry@0.13.1:
resolution:
{
integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==,
}
engines: { node: ">= 4" }
reusify@1.1.0: reusify@1.1.0:
resolution: resolution:
{ {
@@ -4057,6 +4163,12 @@ packages:
} }
engines: { node: ">=10" } engines: { node: ">=10" }
simple-wcswidth@1.0.1:
resolution:
{
integrity: sha512-xMO/8eNREtaROt7tJvWJqHBDTMFN4eiQ5I4JRMuilwfnFcV5W9u7RUkueNkdw0jPqGMX36iCywelS5yilTuOxg==,
}
sisteransi@1.0.5: sisteransi@1.0.5:
resolution: resolution:
{ {
@@ -4550,6 +4662,13 @@ packages:
integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==, integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==,
} }
uuid@10.0.0:
resolution:
{
integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==,
}
hasBin: true
uuid@9.0.1: uuid@9.0.1:
resolution: resolution:
{ {
@@ -4957,6 +5076,8 @@ snapshots:
"@bcoe/v8-coverage@0.2.3": {} "@bcoe/v8-coverage@0.2.3": {}
"@cfworker/json-schema@4.1.1": {}
"@cspotcode/source-map-support@0.8.1": "@cspotcode/source-map-support@0.8.1":
dependencies: dependencies:
"@jridgewell/trace-mapping": 0.3.9 "@jridgewell/trace-mapping": 0.3.9
@@ -5254,6 +5375,23 @@ snapshots:
"@jridgewell/resolve-uri": 3.1.2 "@jridgewell/resolve-uri": 3.1.2
"@jridgewell/sourcemap-codec": 1.5.0 "@jridgewell/sourcemap-codec": 1.5.0
"@langchain/core@0.3.44(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2))":
dependencies:
"@cfworker/json-schema": 4.1.1
ansi-styles: 5.2.0
camelcase: 6.3.0
decamelize: 1.2.0
js-tiktoken: 1.0.19
langsmith: 0.3.15(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2))
mustache: 4.2.0
p-queue: 6.6.2
p-retry: 4.6.2
uuid: 10.0.0
zod: 3.24.2
zod-to-json-schema: 3.24.5(zod@3.24.2)
transitivePeerDependencies:
- openai
"@mistralai/mistralai@1.5.2(zod@3.24.2)": "@mistralai/mistralai@1.5.2(zod@3.24.2)":
dependencies: dependencies:
zod: 3.24.2 zod: 3.24.2
@@ -5511,12 +5649,16 @@ snapshots:
"@types/phoenix@1.6.6": {} "@types/phoenix@1.6.6": {}
"@types/retry@0.12.0": {}
"@types/sqlite3@3.1.11": "@types/sqlite3@3.1.11":
dependencies: dependencies:
"@types/node": 22.13.5 "@types/node": 22.13.5
"@types/stack-utils@2.0.3": {} "@types/stack-utils@2.0.3": {}
"@types/uuid@10.0.0": {}
"@types/uuid@9.0.8": {} "@types/uuid@9.0.8": {}
"@types/ws@8.18.0": "@types/ws@8.18.0":
@@ -5844,6 +5986,10 @@ snapshots:
console-control-strings@1.1.0: console-control-strings@1.1.0:
optional: true optional: true
console-table-printer@2.12.1:
dependencies:
simple-wcswidth: 1.0.1
convert-source-map@2.0.0: {} convert-source-map@2.0.0: {}
create-jest@29.7.0(@types/node@22.13.5)(ts-node@10.9.2(@types/node@22.13.5)(typescript@5.5.4)): create-jest@29.7.0(@types/node@22.13.5)(ts-node@10.9.2(@types/node@22.13.5)(typescript@5.5.4)):
@@ -5877,6 +6023,8 @@ snapshots:
optionalDependencies: optionalDependencies:
supports-color: 5.5.0 supports-color: 5.5.0
decamelize@1.2.0: {}
decompress-response@6.0.0: decompress-response@6.0.0:
dependencies: dependencies:
mimic-response: 3.1.0 mimic-response: 3.1.0
@@ -6001,6 +6149,8 @@ snapshots:
event-target-shim@5.0.1: {} event-target-shim@5.0.1: {}
eventemitter3@4.0.7: {}
execa@5.1.1: execa@5.1.1:
dependencies: dependencies:
cross-spawn: 7.0.6 cross-spawn: 7.0.6
@@ -6720,6 +6870,10 @@ snapshots:
joycon@3.1.1: {} joycon@3.1.1: {}
js-tiktoken@1.0.19:
dependencies:
base64-js: 1.5.1
js-tokens@4.0.0: {} js-tokens@4.0.0: {}
js-yaml@3.14.1: js-yaml@3.14.1:
@@ -6757,6 +6911,18 @@ snapshots:
kolorist@1.8.0: {} kolorist@1.8.0: {}
langsmith@0.3.15(openai@4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2)):
dependencies:
"@types/uuid": 10.0.0
chalk: 4.1.2
console-table-printer: 2.12.1
p-queue: 6.6.2
p-retry: 4.6.2
semver: 7.7.1
uuid: 10.0.0
optionalDependencies:
openai: 4.93.0(encoding@0.1.13)(ws@8.18.1)(zod@3.24.2)
leven@3.1.0: {} leven@3.1.0: {}
lilconfig@3.1.3: {} lilconfig@3.1.3: {}
@@ -6908,6 +7074,8 @@ snapshots:
ms@2.1.3: {} ms@2.1.3: {}
mustache@4.2.0: {}
mz@2.7.0: mz@2.7.0:
dependencies: dependencies:
any-promise: 1.3.0 any-promise: 1.3.0
@@ -7042,6 +7210,8 @@ snapshots:
transitivePeerDependencies: transitivePeerDependencies:
- encoding - encoding
p-finally@1.0.0: {}
p-limit@2.3.0: p-limit@2.3.0:
dependencies: dependencies:
p-try: 2.2.0 p-try: 2.2.0
@@ -7059,6 +7229,20 @@ snapshots:
aggregate-error: 3.1.0 aggregate-error: 3.1.0
optional: true optional: true
p-queue@6.6.2:
dependencies:
eventemitter3: 4.0.7
p-timeout: 3.2.0
p-retry@4.6.2:
dependencies:
"@types/retry": 0.12.0
retry: 0.13.1
p-timeout@3.2.0:
dependencies:
p-finally: 1.0.0
p-try@2.2.0: {} p-try@2.2.0: {}
package-json-from-dist@1.0.1: {} package-json-from-dist@1.0.1: {}
@@ -7297,6 +7481,8 @@ snapshots:
retry@0.12.0: retry@0.12.0:
optional: true optional: true
retry@0.13.1: {}
reusify@1.1.0: {} reusify@1.1.0: {}
rimraf@3.0.2: rimraf@3.0.2:
@@ -7376,6 +7562,8 @@ snapshots:
dependencies: dependencies:
semver: 7.7.1 semver: 7.7.1
simple-wcswidth@1.0.1: {}
sisteransi@1.0.5: {} sisteransi@1.0.5: {}
slash@3.0.0: {} slash@3.0.0: {}
@@ -7687,6 +7875,8 @@ snapshots:
util-deprecate@1.0.2: {} util-deprecate@1.0.2: {}
uuid@10.0.0: {}
uuid@9.0.1: {} uuid@9.0.1: {}
v8-compile-cache-lib@3.0.1: {} v8-compile-cache-lib@3.0.1: {}

View File

@@ -22,6 +22,7 @@ export const DEFAULT_MEMORY_CONFIG: MemoryConfig = {
config: { config: {
apiKey: process.env.OPENAI_API_KEY || "", apiKey: process.env.OPENAI_API_KEY || "",
model: "gpt-4-turbo-preview", model: "gpt-4-turbo-preview",
modelProperties: undefined,
}, },
}, },
enableGraph: false, enableGraph: false,

View File

@@ -9,43 +9,84 @@ export class ConfigManager {
provider: provider:
userConfig.embedder?.provider || userConfig.embedder?.provider ||
DEFAULT_MEMORY_CONFIG.embedder.provider, DEFAULT_MEMORY_CONFIG.embedder.provider,
config: { config: (() => {
apiKey: const defaultConf = DEFAULT_MEMORY_CONFIG.embedder.config;
userConfig.embedder?.config?.apiKey || const userConf = userConfig.embedder?.config;
DEFAULT_MEMORY_CONFIG.embedder.config.apiKey, let finalModel: string | any = defaultConf.model;
model:
userConfig.embedder?.config?.model || if (userConf?.model && typeof userConf.model === "object") {
DEFAULT_MEMORY_CONFIG.embedder.config.model, finalModel = userConf.model;
}, } else if (userConf?.model && typeof userConf.model === "string") {
finalModel = userConf.model;
}
return {
apiKey:
userConf?.apiKey !== undefined
? userConf.apiKey
: defaultConf.apiKey,
model: finalModel,
url: userConf?.url,
};
})(),
}, },
vectorStore: { vectorStore: {
provider: provider:
userConfig.vectorStore?.provider || userConfig.vectorStore?.provider ||
DEFAULT_MEMORY_CONFIG.vectorStore.provider, DEFAULT_MEMORY_CONFIG.vectorStore.provider,
config: { config: (() => {
collectionName: const defaultConf = DEFAULT_MEMORY_CONFIG.vectorStore.config;
userConfig.vectorStore?.config?.collectionName || const userConf = userConfig.vectorStore?.config;
DEFAULT_MEMORY_CONFIG.vectorStore.config.collectionName,
dimension: // Prioritize user-provided client instance
userConfig.vectorStore?.config?.dimension || if (userConf?.client && typeof userConf.client === "object") {
DEFAULT_MEMORY_CONFIG.vectorStore.config.dimension, return {
...userConfig.vectorStore?.config, client: userConf.client,
}, // Include other fields from userConf if necessary, or omit defaults
collectionName: userConf.collectionName, // Can be undefined
dimension: userConf.dimension || defaultConf.dimension, // Merge dimension
...userConf, // Include any other passthrough fields from user
};
} else {
// If no client provided, merge standard fields
return {
collectionName:
userConf?.collectionName || defaultConf.collectionName,
dimension: userConf?.dimension || defaultConf.dimension,
// Ensure client is not carried over from defaults if not provided by user
client: undefined,
// Include other passthrough fields from userConf even if no client
...userConf,
};
}
})(),
}, },
llm: { llm: {
provider: provider:
userConfig.llm?.provider || DEFAULT_MEMORY_CONFIG.llm.provider, userConfig.llm?.provider || DEFAULT_MEMORY_CONFIG.llm.provider,
config: { config: (() => {
apiKey: const defaultConf = DEFAULT_MEMORY_CONFIG.llm.config;
userConfig.llm?.config?.apiKey || const userConf = userConfig.llm?.config;
DEFAULT_MEMORY_CONFIG.llm.config.apiKey, let finalModel: string | any = defaultConf.model;
model:
userConfig.llm?.config?.model || if (userConf?.model && typeof userConf.model === "object") {
DEFAULT_MEMORY_CONFIG.llm.config.model, finalModel = userConf.model;
modelProperties: } else if (userConf?.model && typeof userConf.model === "string") {
userConfig.llm?.config?.modelProperties || finalModel = userConf.model;
DEFAULT_MEMORY_CONFIG.llm.config.modelProperties, }
},
return {
apiKey:
userConf?.apiKey !== undefined
? userConf.apiKey
: defaultConf.apiKey,
model: finalModel,
modelProperties:
userConf?.modelProperties !== undefined
? userConf.modelProperties
: defaultConf.modelProperties,
};
})(),
}, },
historyDbPath: historyDbPath:
userConfig.historyDbPath || DEFAULT_MEMORY_CONFIG.historyDbPath, userConfig.historyDbPath || DEFAULT_MEMORY_CONFIG.historyDbPath,

View File

@@ -0,0 +1,50 @@
import { Embeddings } from "@langchain/core/embeddings";
import { Embedder } from "./base";
import { EmbeddingConfig } from "../types";
export class LangchainEmbedder implements Embedder {
private embedderInstance: Embeddings;
private batchSize?: number; // Some LC embedders have batch size
constructor(config: EmbeddingConfig) {
// Check if config.model is provided and is an object (the instance)
if (!config.model || typeof config.model !== "object") {
throw new Error(
"Langchain embedder provider requires an initialized Langchain Embeddings instance passed via the 'model' field in the embedder config.",
);
}
// Basic check for embedding methods
if (
typeof (config.model as any).embedQuery !== "function" ||
typeof (config.model as any).embedDocuments !== "function"
) {
throw new Error(
"Provided Langchain 'instance' in the 'model' field does not appear to be a valid Langchain Embeddings instance (missing embedQuery or embedDocuments method).",
);
}
this.embedderInstance = config.model as Embeddings;
// Store batch size if the instance has it (optional)
this.batchSize = (this.embedderInstance as any).batchSize;
}
async embed(text: string): Promise<number[]> {
try {
// Use embedQuery for single text embedding
return await this.embedderInstance.embedQuery(text);
} catch (error) {
console.error("Error embedding text with Langchain Embedder:", error);
throw error;
}
}
async embedBatch(texts: string[]): Promise<number[][]> {
try {
// Use embedDocuments for batch embedding
// Langchain's embedDocuments handles batching internally if needed/supported
return await this.embedderInstance.embedDocuments(texts);
} catch (error) {
console.error("Error embedding batch with Langchain Embedder:", error);
throw error;
}
}
}

View File

@@ -1,3 +1,5 @@
import { z } from "zod";
export interface GraphToolParameters { export interface GraphToolParameters {
source: string; source: string;
destination: string; destination: string;
@@ -21,6 +23,58 @@ export interface GraphRelationsParameters {
}>; }>;
} }
// --- Zod Schemas for Tool Arguments ---
// Schema for simple relationship arguments (Update, Delete)
export const GraphSimpleRelationshipArgsSchema = z.object({
source: z
.string()
.describe("The identifier of the source node in the relationship."),
relationship: z
.string()
.describe("The relationship between the source and destination nodes."),
destination: z
.string()
.describe("The identifier of the destination node in the relationship."),
});
// Schema for adding a relationship (includes types)
export const GraphAddRelationshipArgsSchema =
GraphSimpleRelationshipArgsSchema.extend({
source_type: z
.string()
.describe("The type or category of the source node."),
destination_type: z
.string()
.describe("The type or category of the destination node."),
});
// Schema for extracting entities
export const GraphExtractEntitiesArgsSchema = z.object({
entities: z
.array(
z.object({
entity: z.string().describe("The name or identifier of the entity."),
entity_type: z.string().describe("The type or category of the entity."),
}),
)
.describe("An array of entities with their types."),
});
// Schema for establishing relationships
export const GraphRelationsArgsSchema = z.object({
entities: z
.array(GraphSimpleRelationshipArgsSchema)
.describe("An array of relationships (source, relationship, destination)."),
});
// --- Tool Definitions (using JSON schema, keep as is) ---
// Note: The tool definitions themselves still use JSON schema format
// as expected by the LLM APIs. The Zod schemas above are for internal
// validation and potentially for use with Langchain's .withStructuredOutput
// if we adapt it to handle tool calls via schema.
export const UPDATE_MEMORY_TOOL_GRAPH = { export const UPDATE_MEMORY_TOOL_GRAPH = {
type: "function", type: "function",
function: { function: {

View File

@@ -5,6 +5,7 @@ export * from "./embeddings/base";
export * from "./embeddings/openai"; export * from "./embeddings/openai";
export * from "./embeddings/ollama"; export * from "./embeddings/ollama";
export * from "./embeddings/google"; export * from "./embeddings/google";
export * from "./embeddings/langchain";
export * from "./llms/base"; export * from "./llms/base";
export * from "./llms/openai"; export * from "./llms/openai";
export * from "./llms/google"; export * from "./llms/google";
@@ -13,8 +14,11 @@ export * from "./llms/anthropic";
export * from "./llms/groq"; export * from "./llms/groq";
export * from "./llms/ollama"; export * from "./llms/ollama";
export * from "./llms/mistral"; export * from "./llms/mistral";
export * from "./llms/langchain";
export * from "./vector_stores/base"; export * from "./vector_stores/base";
export * from "./vector_stores/memory"; export * from "./vector_stores/memory";
export * from "./vector_stores/qdrant"; export * from "./vector_stores/qdrant";
export * from "./vector_stores/redis"; export * from "./vector_stores/redis";
export * from "./vector_stores/supabase";
export * from "./vector_stores/langchain";
export * from "./utils/factory"; export * from "./utils/factory";

View File

@@ -12,7 +12,7 @@ export interface LLMResponse {
export interface LLM { export interface LLM {
generateResponse( generateResponse(
messages: Array<{ role: string; content: string }>, messages: Array<{ role: string; content: string }>,
response_format: { type: string }, response_format?: { type: string },
tools?: any[], tools?: any[],
): Promise<any>; ): Promise<any>;
generateChat(messages: Message[]): Promise<LLMResponse>; generateChat(messages: Message[]): Promise<LLMResponse>;

View File

@@ -0,0 +1,255 @@
import { BaseLanguageModel } from "@langchain/core/language_models/base";
import {
AIMessage,
HumanMessage,
SystemMessage,
BaseMessage,
} from "@langchain/core/messages";
import { z } from "zod";
import { LLM, LLMResponse } from "./base";
import { LLMConfig, Message } from "../types/index";
// Import the schemas directly into LangchainLLM
import { FactRetrievalSchema, MemoryUpdateSchema } from "../prompts";
// Import graph tool argument schemas
import {
GraphExtractEntitiesArgsSchema,
GraphRelationsArgsSchema,
GraphSimpleRelationshipArgsSchema, // Used for delete tool
} from "../graphs/tools";
const convertToLangchainMessages = (messages: Message[]): BaseMessage[] => {
return messages.map((msg) => {
const content =
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content);
switch (msg.role?.toLowerCase()) {
case "system":
return new SystemMessage(content);
case "user":
case "human":
return new HumanMessage(content);
case "assistant":
case "ai":
return new AIMessage(content);
default:
console.warn(
`Unsupported message role '${msg.role}' for Langchain. Treating as 'human'.`,
);
return new HumanMessage(content);
}
});
};
export class LangchainLLM implements LLM {
private llmInstance: BaseLanguageModel;
private modelName: string;
constructor(config: LLMConfig) {
if (!config.model || typeof config.model !== "object") {
throw new Error(
"Langchain provider requires an initialized Langchain instance passed via the 'model' field in the LLM config.",
);
}
if (typeof (config.model as any).invoke !== "function") {
throw new Error(
"Provided Langchain 'instance' in the 'model' field does not appear to be a valid Langchain language model (missing invoke method).",
);
}
this.llmInstance = config.model as BaseLanguageModel;
this.modelName =
(this.llmInstance as any).modelId ||
(this.llmInstance as any).model ||
"langchain-model";
}
async generateResponse(
messages: Message[],
response_format?: { type: string },
tools?: any[],
): Promise<string | LLMResponse> {
const langchainMessages = convertToLangchainMessages(messages);
let runnable: any = this.llmInstance;
const invokeOptions: Record<string, any> = {};
let isStructuredOutput = false;
let selectedSchema: z.ZodSchema<any> | null = null;
let isToolCallResponse = false;
// --- Internal Schema Selection Logic (runs regardless of response_format) ---
const systemPromptContent =
(messages.find((m) => m.role === "system")?.content as string) || "";
const userPromptContent =
(messages.find((m) => m.role === "user")?.content as string) || "";
const toolNames = tools?.map((t) => t.function.name) || [];
// Prioritize tool call argument schemas
if (toolNames.includes("extract_entities")) {
selectedSchema = GraphExtractEntitiesArgsSchema;
isToolCallResponse = true;
} else if (toolNames.includes("establish_relationships")) {
selectedSchema = GraphRelationsArgsSchema;
isToolCallResponse = true;
} else if (toolNames.includes("delete_graph_memory")) {
selectedSchema = GraphSimpleRelationshipArgsSchema;
isToolCallResponse = true;
}
// Check for memory prompts if no tool schema matched
else if (
systemPromptContent.includes("Personal Information Organizer") &&
systemPromptContent.includes("extract relevant pieces of information")
) {
selectedSchema = FactRetrievalSchema;
} else if (
userPromptContent.includes("smart memory manager") &&
userPromptContent.includes("Compare newly retrieved facts")
) {
selectedSchema = MemoryUpdateSchema;
}
// --- Apply Structured Output if Schema Selected ---
if (
selectedSchema &&
typeof (this.llmInstance as any).withStructuredOutput === "function"
) {
// Apply if a schema was selected (for memory or single tool calls)
if (
!isToolCallResponse ||
(isToolCallResponse && tools && tools.length === 1)
) {
try {
runnable = (this.llmInstance as any).withStructuredOutput(
selectedSchema,
{ name: tools?.[0]?.function.name },
);
isStructuredOutput = true;
} catch (e) {
isStructuredOutput = false; // Ensure flag is false on error
// No fallback to response_format here unless explicitly passed
if (response_format?.type === "json_object") {
invokeOptions.response_format = { type: "json_object" };
}
}
} else if (isToolCallResponse) {
// If multiple tools, don't apply structured output, handle via tool binding below
}
} else if (selectedSchema && response_format?.type === "json_object") {
// Schema selected, but no .withStructuredOutput. Try basic response_format only if explicitly requested.
if (
(this.llmInstance as any)._identifyingParams?.response_format ||
(this.llmInstance as any).response_format
) {
invokeOptions.response_format = { type: "json_object" };
}
} else if (!selectedSchema && response_format?.type === "json_object") {
// Explicit JSON request, but no schema inferred. Try basic response_format.
if (
(this.llmInstance as any)._identifyingParams?.response_format ||
(this.llmInstance as any).response_format
) {
invokeOptions.response_format = { type: "json_object" };
}
}
// --- Handle tool binding ---
if (tools && tools.length > 0) {
if (typeof (runnable as any).bindTools === "function") {
try {
runnable = (runnable as any).bindTools(tools);
} catch (e) {}
} else {
}
}
// --- Invoke and Process Response ---
try {
const response = await runnable.invoke(langchainMessages, invokeOptions);
if (isStructuredOutput && !isToolCallResponse) {
// Memory prompt with structured output
return JSON.stringify(response);
} else if (isStructuredOutput && isToolCallResponse) {
// Tool call with structured arguments
if (response?.tool_calls && Array.isArray(response.tool_calls)) {
const mappedToolCalls = response.tool_calls.map((call: any) => ({
name: call.name || tools?.[0]?.function.name || "unknown_tool",
arguments:
typeof call.args === "string"
? call.args
: JSON.stringify(call.args),
}));
return {
content: response.content || "",
role: "assistant",
toolCalls: mappedToolCalls,
};
} else {
// Direct object response for tool args
return {
content: "",
role: "assistant",
toolCalls: [
{
name: tools?.[0]?.function.name || "unknown_tool",
arguments: JSON.stringify(response),
},
],
};
}
} else if (
response &&
response.tool_calls &&
Array.isArray(response.tool_calls)
) {
// Standard tool call response (no structured output used/failed)
const mappedToolCalls = response.tool_calls.map((call: any) => ({
name: call.name || "unknown_tool",
arguments:
typeof call.args === "string"
? call.args
: JSON.stringify(call.args),
}));
return {
content: response.content || "",
role: "assistant",
toolCalls: mappedToolCalls,
};
} else if (response && typeof response.content === "string") {
// Standard text response
return response.content;
} else {
// Fallback for unexpected formats
return JSON.stringify(response);
}
} catch (error) {
throw error;
}
}
async generateChat(messages: Message[]): Promise<LLMResponse> {
const langchainMessages = convertToLangchainMessages(messages);
try {
const response = await this.llmInstance.invoke(langchainMessages);
if (response && typeof response.content === "string") {
return {
content: response.content,
role: (response as BaseMessage).lc_id ? "assistant" : "assistant",
};
} else {
console.warn(
`Unexpected response format from Langchain instance (${this.modelName}) for generateChat:`,
response,
);
return {
content: JSON.stringify(response),
role: "assistant",
};
}
} catch (error) {
console.error(
`Error invoking Langchain instance (${this.modelName}) for generateChat:`,
error,
);
throw error;
}
}
}

View File

@@ -43,7 +43,7 @@ export class Memory {
private vectorStore: VectorStore; private vectorStore: VectorStore;
private llm: LLM; private llm: LLM;
private db: HistoryManager; private db: HistoryManager;
private collectionName: string; private collectionName: string | undefined;
private apiVersion: string; private apiVersion: string;
private graphMemory?: MemoryGraph; private graphMemory?: MemoryGraph;
private enableGraph: boolean; private enableGraph: boolean;
@@ -241,12 +241,10 @@ export class Memory {
} }
const parsedMessages = messages.map((m) => m.content).join("\n"); const parsedMessages = messages.map((m) => m.content).join("\n");
// Get prompts
const [systemPrompt, userPrompt] = this.customPrompt const [systemPrompt, userPrompt] = this.customPrompt
? [this.customPrompt, `Input:\n${parsedMessages}`] ? [this.customPrompt, `Input:\n${parsedMessages}`]
: getFactRetrievalMessages(parsedMessages); : getFactRetrievalMessages(parsedMessages);
// Extract facts using LLM
const response = await this.llm.generateResponse( const response = await this.llm.generateResponse(
[ [
{ role: "system", content: systemPrompt }, { role: "system", content: systemPrompt },
@@ -255,8 +253,18 @@ export class Memory {
{ type: "json_object" }, { type: "json_object" },
); );
const cleanResponse = removeCodeBlocks(response); const cleanResponse = removeCodeBlocks(response as string);
const facts = JSON.parse(cleanResponse).facts || []; let facts: string[] = [];
try {
facts = JSON.parse(cleanResponse).facts || [];
} catch (e) {
console.error(
"Failed to parse facts from LLM response:",
cleanResponse,
e,
);
facts = [];
}
// Get embeddings for new facts // Get embeddings for new facts
const newMessageEmbeddings: Record<string, number[]> = {}; const newMessageEmbeddings: Record<string, number[]> = {};
@@ -292,13 +300,24 @@ export class Memory {
// Get memory update decisions // Get memory update decisions
const updatePrompt = getUpdateMemoryMessages(uniqueOldMemories, facts); const updatePrompt = getUpdateMemoryMessages(uniqueOldMemories, facts);
const updateResponse = await this.llm.generateResponse( const updateResponse = await this.llm.generateResponse(
[{ role: "user", content: updatePrompt }], [{ role: "user", content: updatePrompt }],
{ type: "json_object" }, { type: "json_object" },
); );
const cleanUpdateResponse = removeCodeBlocks(updateResponse); const cleanUpdateResponse = removeCodeBlocks(updateResponse as string);
const memoryActions = JSON.parse(cleanUpdateResponse).memory || []; let memoryActions: any[] = [];
try {
memoryActions = JSON.parse(cleanUpdateResponse).memory || [];
} catch (e) {
console.error(
"Failed to parse memory actions from LLM response:",
cleanUpdateResponse,
e,
);
memoryActions = [];
}
// Process memory actions // Process memory actions
const results: MemoryItem[] = []; const results: MemoryItem[] = [];
@@ -511,14 +530,47 @@ export class Memory {
async reset(): Promise<void> { async reset(): Promise<void> {
await this._captureEvent("reset"); await this._captureEvent("reset");
await this.db.reset(); await this.db.reset();
await this.vectorStore.deleteCol();
if (this.graphMemory) { // Check provider before attempting deleteCol
await this.graphMemory.deleteAll({ userId: "default" }); if (this.config.vectorStore.provider.toLowerCase() !== "langchain") {
try {
await this.vectorStore.deleteCol();
} catch (e) {
console.error(
`Failed to delete collection for provider '${this.config.vectorStore.provider}':`,
e,
);
// Decide if you want to re-throw or just log
}
} else {
console.warn(
"Memory.reset(): Skipping vector store collection deletion as 'langchain' provider is used. Underlying Langchain vector store data is not cleared by this operation.",
);
} }
if (this.graphMemory) {
await this.graphMemory.deleteAll({ userId: "default" }); // Assuming this is okay, or needs similar check?
}
// Re-initialize factories/clients based on the original config
this.embedder = EmbedderFactory.create(
this.config.embedder.provider,
this.config.embedder.config,
);
// Re-create vector store instance - crucial for Langchain to reset wrapper state if needed
this.vectorStore = VectorStoreFactory.create( this.vectorStore = VectorStoreFactory.create(
this.config.vectorStore.provider, this.config.vectorStore.provider,
this.config.vectorStore.config, this.config.vectorStore.config, // This will pass the original client instance back
); );
this.llm = LLMFactory.create(
this.config.llm.provider,
this.config.llm.config,
);
// Re-init DB if needed (though db.reset() likely handles its state)
// Re-init Graph if needed
// Re-initialize telemetry
this._initializeTelemetry();
} }
async getAll(config: GetAllMemoryOptions): Promise<SearchResult> { async getAll(config: GetAllMemoryOptions): Promise<SearchResult> {

View File

@@ -1,3 +1,37 @@
import { z } from "zod";
// Define Zod schema for fact retrieval output
export const FactRetrievalSchema = z.object({
facts: z
.array(z.string())
.describe("An array of distinct facts extracted from the conversation."),
});
// Define Zod schema for memory update output
export const MemoryUpdateSchema = z.object({
memory: z
.array(
z.object({
id: z.string().describe("The unique identifier of the memory item."),
text: z.string().describe("The content of the memory item."),
event: z
.enum(["ADD", "UPDATE", "DELETE", "NONE"])
.describe(
"The action taken for this memory item (ADD, UPDATE, DELETE, or NONE).",
),
old_memory: z
.string()
.optional()
.describe(
"The previous content of the memory item if the event was UPDATE.",
),
}),
)
.describe(
"An array representing the state of memory items after processing new facts.",
),
});
export function getFactRetrievalMessages( export function getFactRetrievalMessages(
parsedMessages: string, parsedMessages: string,
): [string, string] { ): [string, string] {

View File

@@ -14,13 +14,15 @@ export interface Message {
export interface EmbeddingConfig { export interface EmbeddingConfig {
apiKey?: string; apiKey?: string;
model?: string; model?: string | any;
url?: string; url?: string;
} }
export interface VectorStoreConfig { export interface VectorStoreConfig {
collectionName: string; collectionName?: string;
dimension?: number; dimension?: number;
client?: any;
instance?: any;
[key: string]: any; [key: string]: any;
} }
@@ -38,7 +40,7 @@ export interface LLMConfig {
provider?: string; provider?: string;
config?: Record<string, any>; config?: Record<string, any>;
apiKey?: string; apiKey?: string;
model?: string; model?: string | any;
modelProperties?: Record<string, any>; modelProperties?: Record<string, any>;
} }
@@ -110,24 +112,25 @@ export const MemoryConfigSchema = z.object({
embedder: z.object({ embedder: z.object({
provider: z.string(), provider: z.string(),
config: z.object({ config: z.object({
apiKey: z.string(), apiKey: z.string().optional(),
model: z.string().optional(), model: z.union([z.string(), z.any()]).optional(),
}), }),
}), }),
vectorStore: z.object({ vectorStore: z.object({
provider: z.string(), provider: z.string(),
config: z config: z
.object({ .object({
collectionName: z.string(), collectionName: z.string().optional(),
dimension: z.number().optional(), dimension: z.number().optional(),
client: z.any().optional(),
}) })
.passthrough(), .passthrough(),
}), }),
llm: z.object({ llm: z.object({
provider: z.string(), provider: z.string(),
config: z.object({ config: z.object({
apiKey: z.string(), apiKey: z.string().optional(),
model: z.string().optional(), model: z.union([z.string(), z.any()]).optional(),
modelProperties: z.record(z.string(), z.any()).optional(), modelProperties: z.record(z.string(), z.any()).optional(),
}), }),
}), }),

View File

@@ -26,6 +26,9 @@ import { HistoryManager } from "../storage/base";
import { GoogleEmbedder } from "../embeddings/google"; import { GoogleEmbedder } from "../embeddings/google";
import { GoogleLLM } from "../llms/google"; import { GoogleLLM } from "../llms/google";
import { AzureOpenAILLM } from "../llms/azure"; import { AzureOpenAILLM } from "../llms/azure";
import { LangchainLLM } from "../llms/langchain";
import { LangchainEmbedder } from "../embeddings/langchain";
import { LangchainVectorStore } from "../vector_stores/langchain";
export class EmbedderFactory { export class EmbedderFactory {
static create(provider: string, config: EmbeddingConfig): Embedder { static create(provider: string, config: EmbeddingConfig): Embedder {
@@ -36,6 +39,8 @@ export class EmbedderFactory {
return new OllamaEmbedder(config); return new OllamaEmbedder(config);
case "google": case "google":
return new GoogleEmbedder(config); return new GoogleEmbedder(config);
case "langchain":
return new LangchainEmbedder(config);
default: default:
throw new Error(`Unsupported embedder provider: ${provider}`); throw new Error(`Unsupported embedder provider: ${provider}`);
} }
@@ -44,7 +49,7 @@ export class EmbedderFactory {
export class LLMFactory { export class LLMFactory {
static create(provider: string, config: LLMConfig): LLM { static create(provider: string, config: LLMConfig): LLM {
switch (provider) { switch (provider.toLowerCase()) {
case "openai": case "openai":
return new OpenAILLM(config); return new OpenAILLM(config);
case "openai_structured": case "openai_structured":
@@ -61,6 +66,8 @@ export class LLMFactory {
return new AzureOpenAILLM(config); return new AzureOpenAILLM(config);
case "mistral": case "mistral":
return new MistralLLM(config); return new MistralLLM(config);
case "langchain":
return new LangchainLLM(config);
default: default:
throw new Error(`Unsupported LLM provider: ${provider}`); throw new Error(`Unsupported LLM provider: ${provider}`);
} }
@@ -73,11 +80,13 @@ export class VectorStoreFactory {
case "memory": case "memory":
return new MemoryVectorStore(config); return new MemoryVectorStore(config);
case "qdrant": case "qdrant":
return new Qdrant(config as any); // Type assertion needed as config is extended return new Qdrant(config as any);
case "redis": case "redis":
return new RedisDB(config as any); // Type assertion needed as config is extended return new RedisDB(config as any);
case "supabase": case "supabase":
return new SupabaseDB(config as any); // Type assertion needed as config is extended return new SupabaseDB(config as any);
case "langchain":
return new LangchainVectorStore(config as any);
default: default:
throw new Error(`Unsupported vector store provider: ${provider}`); throw new Error(`Unsupported vector store provider: ${provider}`);
} }

View File

@@ -0,0 +1,231 @@
import { VectorStore as LangchainVectorStoreInterface } from "@langchain/core/vectorstores";
import { Document } from "@langchain/core/documents";
import { VectorStore } from "./base"; // mem0's VectorStore interface
import { SearchFilters, VectorStoreConfig, VectorStoreResult } from "../types";
// Config specifically for the Langchain wrapper
interface LangchainStoreConfig extends VectorStoreConfig {
client: LangchainVectorStoreInterface;
// dimension might still be useful for validation if not automatically inferred
}
export class LangchainVectorStore implements VectorStore {
private lcStore: LangchainVectorStoreInterface;
private dimension?: number;
private storeUserId: string = "anonymous-langchain-user"; // Simple in-memory user ID
constructor(config: LangchainStoreConfig) {
if (!config.client || typeof config.client !== "object") {
throw new Error(
"Langchain vector store provider requires an initialized Langchain VectorStore instance passed via the 'client' field.",
);
}
// Basic checks for core methods
if (
typeof config.client.addVectors !== "function" ||
typeof config.client.similaritySearchVectorWithScore !== "function"
) {
throw new Error(
"Provided Langchain 'client' does not appear to be a valid Langchain VectorStore (missing addVectors or similaritySearchVectorWithScore method).",
);
}
this.lcStore = config.client;
this.dimension = config.dimension;
// Attempt to get dimension from the underlying store if not provided
if (
!this.dimension &&
(this.lcStore as any).embeddings?.embeddingDimension
) {
this.dimension = (this.lcStore as any).embeddings.embeddingDimension;
}
if (
!this.dimension &&
(this.lcStore as any).embedding?.embeddingDimension
) {
this.dimension = (this.lcStore as any).embedding.embeddingDimension;
}
// If still no dimension, we might need to throw or warn, as it's needed for validation
if (!this.dimension) {
console.warn(
"LangchainVectorStore: Could not determine embedding dimension. Input validation might be skipped.",
);
}
}
// --- Method Mappings ---
async insert(
vectors: number[][],
ids: string[],
payloads: Record<string, any>[],
): Promise<void> {
if (!ids || ids.length !== vectors.length) {
throw new Error(
"IDs array must be provided and have the same length as vectors.",
);
}
if (this.dimension) {
vectors.forEach((v, i) => {
if (v.length !== this.dimension) {
throw new Error(
`Vector dimension mismatch at index ${i}. Expected ${this.dimension}, got ${v.length}`,
);
}
});
}
// Convert payloads to Langchain Document metadata format
const documents = payloads.map((payload, i) => {
// Provide empty pageContent, store mem0 id and other data in metadata
return new Document({
pageContent: "", // Add required empty pageContent
metadata: { ...payload, _mem0_id: ids[i] },
});
});
// Use addVectors. Note: Langchain stores often generate their own internal IDs.
// We store the mem0 ID in the metadata (`_mem0_id`).
try {
await this.lcStore.addVectors(vectors, documents, { ids }); // Pass mem0 ids if the store supports it
} catch (e) {
// Fallback if the store doesn't support passing ids directly during addVectors
console.warn(
"Langchain store might not support custom IDs on insert. Trying without IDs.",
e,
);
await this.lcStore.addVectors(vectors, documents);
}
}
async search(
query: number[],
limit: number = 5,
filters?: SearchFilters, // filters parameter is received but will be ignored
): Promise<VectorStoreResult[]> {
if (this.dimension && query.length !== this.dimension) {
throw new Error(
`Query vector dimension mismatch. Expected ${this.dimension}, got ${query.length}`,
);
}
// --- Remove filter processing logic ---
// Filters passed via mem0 interface are not reliably translatable to generic Langchain stores.
// let lcFilter: any = undefined;
// if (filters && ...) { ... }
// console.warn("LangchainVectorStore: Passing filters directly..."); // Remove warning
// Call similaritySearchVectorWithScore WITHOUT the filter argument
const results = await this.lcStore.similaritySearchVectorWithScore(
query,
limit,
// Do not pass lcFilter here
);
// Map Langchain results [Document, score] back to mem0 VectorStoreResult
return results.map(([doc, score]) => ({
id: doc.metadata._mem0_id || "unknown_id",
payload: doc.metadata,
score: score,
}));
}
// --- Methods with No Direct Langchain Equivalent (Throwing Errors) ---
async get(vectorId: string): Promise<VectorStoreResult | null> {
// Most Langchain stores lack a direct getById. Simulation is inefficient.
console.error(
`LangchainVectorStore: The 'get' method is not directly supported by most Langchain VectorStores.`,
);
throw new Error(
"Method 'get' not reliably supported by LangchainVectorStore wrapper.",
);
// Potential (inefficient) simulation:
// Perform a search with a filter like { _mem0_id: vectorId }, limit 1.
// This requires the underlying store to support filtering on _mem0_id.
}
async update(
vectorId: string,
vector: number[],
payload: Record<string, any>,
): Promise<void> {
// Updates often require delete + add in Langchain.
console.error(
`LangchainVectorStore: The 'update' method is not directly supported. Use delete followed by insert.`,
);
throw new Error(
"Method 'update' not supported by LangchainVectorStore wrapper.",
);
// Possible implementation: Check if store has delete, call delete({_mem0_id: vectorId}), then insert.
}
async delete(vectorId: string): Promise<void> {
// Check if the underlying store supports deletion by ID
if (typeof (this.lcStore as any).delete === "function") {
try {
// We need to delete based on our stored _mem0_id.
// Langchain's delete often takes its own internal IDs or filter.
// Attempting deletion via filter is the most likely approach.
console.warn(
"LangchainVectorStore: Attempting delete via filter on '_mem0_id'. Success depends on the specific Langchain VectorStore's delete implementation.",
);
await (this.lcStore as any).delete({ filter: { _mem0_id: vectorId } });
// OR if it takes IDs directly (less common for *our* IDs):
// await (this.lcStore as any).delete({ ids: [vectorId] });
} catch (e) {
console.error(
`LangchainVectorStore: Delete failed. Underlying store's delete method might expect different arguments or filters. Error: ${e}`,
);
throw new Error(`Delete failed in underlying Langchain store: ${e}`);
}
} else {
console.error(
`LangchainVectorStore: The underlying Langchain store instance does not seem to support a 'delete' method.`,
);
throw new Error(
"Method 'delete' not available on the provided Langchain VectorStore client.",
);
}
}
async list(
filters?: SearchFilters,
limit: number = 100,
): Promise<[VectorStoreResult[], number]> {
// No standard list method in Langchain core interface.
console.error(
`LangchainVectorStore: The 'list' method is not supported by the generic LangchainVectorStore wrapper.`,
);
throw new Error(
"Method 'list' not supported by LangchainVectorStore wrapper.",
);
// Could potentially be implemented if the underlying store has a specific list/scroll/query capability.
}
async deleteCol(): Promise<void> {
console.error(
`LangchainVectorStore: The 'deleteCol' method is not supported by the generic LangchainVectorStore wrapper.`,
);
throw new Error(
"Method 'deleteCol' not supported by LangchainVectorStore wrapper.",
);
}
// --- Wrapper-Specific Methods (In-Memory User ID) ---
async getUserId(): Promise<string> {
return this.storeUserId;
}
async setUserId(userId: string): Promise<void> {
this.storeUserId = userId;
}
async initialize(): Promise<void> {
// No specific initialization needed for the wrapper itself,
// assuming the passed Langchain client is already initialized.
return Promise.resolve();
}
}