Supabase Vector Store (#2427)

This commit is contained in:
Saket Aryan
2025-03-25 00:15:50 +05:30
committed by GitHub
parent 9db5f62262
commit 2b49c9eedd
14 changed files with 8422 additions and 8 deletions

View File

@@ -4,7 +4,8 @@ Create a [Supabase](https://supabase.com/dashboard/projects) account and project
### Usage ### Usage
```python <CodeGroup>
```python Python
import os import os
from mem0 import Memory from mem0 import Memory
@@ -32,10 +33,90 @@ messages = [
m.add(messages, user_id="alice", metadata={"category": "movies"}) m.add(messages, user_id="alice", metadata={"category": "movies"})
``` ```
```typescript Typescript
import { Memory } from "mem0ai/oss";
const config = {
vectorStore: {
provider: "supabase",
config: {
collectionName: "memories",
embeddingModelDims: 1536,
supabaseUrl: process.env.SUPABASE_URL || "",
supabaseKey: process.env.SUPABASE_KEY || "",
tableName: "memories",
},
},
}
const memory = new Memory(config);
const messages = [
{"role": "user", "content": "I'm planning to watch a movie tonight. Any recommendations?"},
{"role": "assistant", "content": "How about a thriller movies? They can be quite engaging."},
{"role": "user", "content": "I'm not a big fan of thriller movies but I love sci-fi movies."},
{"role": "assistant", "content": "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future."}
]
await memory.add(messages, { userId: "alice", metadata: { category: "movies" } });
```
</CodeGroup>
### SQL Migrations for TypeScript Implementation
The following SQL migrations are required to enable the vector extension and create the memories table:
```sql
-- Enable the vector extension
create extension if not exists vector;
-- Create the memories table
create table if not exists memories (
id text primary key,
embedding vector(1536),
metadata jsonb,
created_at timestamp with time zone default timezone('utc', now()),
updated_at timestamp with time zone default timezone('utc', now())
);
-- Create the vector similarity search function
create or replace function match_vectors(
query_embedding vector(1536),
match_count int,
filter jsonb default '{}'::jsonb
)
returns table (
id text,
similarity float,
metadata jsonb
)
language plpgsql
as $$
begin
return query
select
id,
similarity,
metadata
from memories
where case
when filter::text = '{}'::text then true
else metadata @> filter
end
order by embedding <=> query_embedding
limit match_count;
end;
$$;
```
Goto [Supabase](https://supabase.com/dashboard/projects) and run the above SQL migrations inside the SQL Editor.
### Config ### Config
Here are the parameters available for configuring Supabase: Here are the parameters available for configuring Supabase:
<Tabs>
<Tab title="Python">
| Parameter | Description | Default Value | | Parameter | Description | Default Value |
| --- | --- | --- | | --- | --- | --- |
| `connection_string` | PostgreSQL connection string (required) | None | | `connection_string` | PostgreSQL connection string (required) | None |
@@ -43,6 +124,17 @@ Here are the parameters available for configuring Supabase:
| `embedding_model_dims` | Dimensions of the embedding model | `1536` | | `embedding_model_dims` | Dimensions of the embedding model | `1536` |
| `index_method` | Vector index method to use | `auto` | | `index_method` | Vector index method to use | `auto` |
| `index_measure` | Distance measure for similarity search | `cosine_distance` | | `index_measure` | Distance measure for similarity search | `cosine_distance` |
</Tab>
<Tab title="TypeScript">
| Parameter | Description | Default Value |
| --- | --- | --- |
| `collectionName` | Name for the vector collection | `mem0` |
| `embeddingModelDims` | Dimensions of the embedding model | `1536` |
| `supabaseUrl` | Supabase URL | None |
| `supabaseKey` | Supabase key | None |
| `tableName` | Name for the vector table | `memories` |
</Tab>
</Tabs>
### Index Methods ### Index Methods

View File

@@ -1,6 +1,6 @@
{ {
"name": "mem0ai", "name": "mem0ai",
"version": "2.1.8", "version": "2.1.9",
"description": "The Memory Layer For Your AI Apps", "description": "The Memory Layer For Your AI Apps",
"main": "./dist/index.js", "main": "./dist/index.js",
"module": "./dist/index.mjs", "module": "./dist/index.mjs",
@@ -34,7 +34,8 @@
"clean": "rimraf dist", "clean": "rimraf dist",
"build": "npm run clean && npx prettier --check . && npx tsup", "build": "npm run clean && npx prettier --check . && npx tsup",
"dev": "npx nodemon", "dev": "npx nodemon",
"start": "npx ts-node src/oss/examples/basic.ts", "start": "pnpm run example memory",
"example": "ts-node src/oss/examples/vector-stores/index.ts",
"test": "jest", "test": "jest",
"test:ts": "jest --config jest.config.js", "test:ts": "jest --config jest.config.js",
"test:watch": "jest --config jest.config.js --watch", "test:watch": "jest --config jest.config.js --watch",
@@ -99,14 +100,15 @@
"peerDependencies": { "peerDependencies": {
"@anthropic-ai/sdk": "0.18.0", "@anthropic-ai/sdk": "0.18.0",
"@qdrant/js-client-rest": "1.13.0", "@qdrant/js-client-rest": "1.13.0",
"@supabase/supabase-js": "^2.49.1",
"@types/jest": "29.5.14", "@types/jest": "29.5.14",
"@types/pg": "8.11.0", "@types/pg": "8.11.0",
"@types/sqlite3": "3.1.11", "@types/sqlite3": "3.1.11",
"groq-sdk": "0.3.0", "groq-sdk": "0.3.0",
"ollama": "^0.5.14",
"pg": "8.11.3", "pg": "8.11.3",
"redis": "4.7.0", "redis": "4.7.0",
"sqlite3": "5.1.7", "sqlite3": "5.1.7"
"ollama": "^0.5.14"
}, },
"peerDependenciesMeta": { "peerDependenciesMeta": {
"posthog-node": { "posthog-node": {

7643
mem0-ts/pnpm-lock.yaml generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,99 @@
import { Memory } from "../../src";
export async function runTests(memory: Memory) {
try {
// Reset all memories
console.log("\nResetting all memories...");
await memory.reset();
console.log("All memories reset");
// Add a single memory
console.log("\nAdding a single memory...");
const result1 = await memory.add(
"Hi, my name is John and I am a software engineer.",
{
userId: "john",
},
);
console.log("Added memory:", result1);
// Add multiple messages
console.log("\nAdding multiple messages...");
const result2 = await memory.add(
[
{ role: "user", content: "What is your favorite city?" },
{ role: "assistant", content: "I love Paris, it is my favorite city." },
],
{
userId: "john",
},
);
console.log("Added messages:", result2);
// Trying to update the memory
const result3 = await memory.add(
[
{ role: "user", content: "What is your favorite city?" },
{
role: "assistant",
content: "I love New York, it is my favorite city.",
},
],
{
userId: "john",
},
);
console.log("Updated messages:", result3);
// Get a single memory
console.log("\nGetting a single memory...");
if (result1.results && result1.results.length > 0) {
const singleMemory = await memory.get(result1.results[0].id);
console.log("Single memory:", singleMemory);
} else {
console.log("No memory was added in the first step");
}
// Updating this memory
const result4 = await memory.update(
result1.results[0].id,
"I love India, it is my favorite country.",
);
console.log("Updated memory:", result4);
// Get all memories
console.log("\nGetting all memories...");
const allMemories = await memory.getAll({
userId: "john",
});
console.log("All memories:", allMemories);
// Search for memories
console.log("\nSearching memories...");
const searchResult = await memory.search("What do you know about Paris?", {
userId: "john",
});
console.log("Search results:", searchResult);
// Get memory history
if (result1.results && result1.results.length > 0) {
console.log("\nGetting memory history...");
const history = await memory.history(result1.results[0].id);
console.log("Memory history:", history);
}
// Delete a memory
if (result1.results && result1.results.length > 0) {
console.log("\nDeleting a memory...");
await memory.delete(result1.results[0].id);
console.log("Memory deleted successfully");
}
// Reset all memories
console.log("\nResetting all memories...");
await memory.reset();
console.log("All memories reset");
} catch (error) {
console.error("Error:", error);
}
}

View File

@@ -0,0 +1,53 @@
import dotenv from "dotenv";
import { demoMemoryStore } from "./memory";
import { demoSupabase } from "./supabase";
// import { demoQdrant } from "./qdrant";
// import { demoRedis } from "./redis";
// import { demoPGVector } from "./pgvector";
// Load environment variables
dotenv.config();
async function main() {
const args = process.argv.slice(2);
const selectedStore = args[0]?.toLowerCase();
const stores: Record<string, () => Promise<void>> = {
// memory: demoMemoryStore,
supabase: demoSupabase,
// Uncomment these as they are implemented
// qdrant: demoQdrant,
// redis: demoRedis,
// pgvector: demoPGVector,
};
if (selectedStore) {
const demo = stores[selectedStore];
if (demo) {
try {
await demo();
} catch (error) {
console.error(`\nError running ${selectedStore} demo:`, error);
if (selectedStore !== "memory") {
console.log("\nFalling back to memory store...");
await stores.memory();
}
}
} else {
console.log(`\nUnknown vector store: ${selectedStore}`);
console.log("Available stores:", Object.keys(stores).join(", "));
}
return;
}
// If no store specified, run all available demos
for (const [name, demo] of Object.entries(stores)) {
try {
await demo();
} catch (error) {
console.error(`\nError running ${name} demo:`, error);
}
}
}
main().catch(console.error);

View File

@@ -0,0 +1,38 @@
import { Memory } from "../../src";
import { runTests } from "../utils/test-utils";
export async function demoMemoryStore() {
console.log("\n=== Testing In-Memory Vector Store ===\n");
const memory = new Memory({
version: "v1.1",
embedder: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "text-embedding-3-small",
},
},
vectorStore: {
provider: "memory",
config: {
collectionName: "memories",
dimension: 1536,
},
},
llm: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "gpt-4-turbo-preview",
},
},
historyDbPath: "memory.db",
});
await runTests(memory);
}
if (require.main === module) {
demoMemoryStore();
}

View File

@@ -0,0 +1,49 @@
import { Memory } from "../../src";
import { runTests } from "../utils/test-utils";
export async function demoPGVector() {
console.log("\n=== Testing PGVector Store ===\n");
const memory = new Memory({
version: "v1.1",
embedder: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "text-embedding-3-small",
},
},
vectorStore: {
provider: "pgvector",
config: {
collectionName: "memories",
dimension: 1536,
dbname: process.env.PGVECTOR_DB || "vectordb",
user: process.env.PGVECTOR_USER || "postgres",
password: process.env.PGVECTOR_PASSWORD || "postgres",
host: process.env.PGVECTOR_HOST || "localhost",
port: parseInt(process.env.PGVECTOR_PORT || "5432"),
embeddingModelDims: 1536,
hnsw: true,
},
},
llm: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "gpt-4-turbo-preview",
},
},
historyDbPath: "memory.db",
});
await runTests(memory);
}
if (require.main === module) {
if (!process.env.PGVECTOR_DB) {
console.log("\nSkipping PGVector test - environment variables not set");
process.exit(0);
}
demoPGVector();
}

View File

@@ -0,0 +1,50 @@
import { Memory } from "../../src";
import { runTests } from "../utils/test-utils";
export async function demoQdrant() {
console.log("\n=== Testing Qdrant Store ===\n");
const memory = new Memory({
version: "v1.1",
embedder: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "text-embedding-3-small",
},
},
vectorStore: {
provider: "qdrant",
config: {
collectionName: "memories",
embeddingModelDims: 1536,
url: process.env.QDRANT_URL,
apiKey: process.env.QDRANT_API_KEY,
path: process.env.QDRANT_PATH,
host: process.env.QDRANT_HOST,
port: process.env.QDRANT_PORT
? parseInt(process.env.QDRANT_PORT)
: undefined,
onDisk: true,
},
},
llm: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "gpt-4-turbo-preview",
},
},
historyDbPath: "memory.db",
});
await runTests(memory);
}
if (require.main === module) {
if (!process.env.QDRANT_URL && !process.env.QDRANT_HOST) {
console.log("\nSkipping Qdrant test - environment variables not set");
process.exit(0);
}
demoQdrant();
}

View File

@@ -0,0 +1,45 @@
import { Memory } from "../../src";
import { runTests } from "../utils/test-utils";
export async function demoRedis() {
console.log("\n=== Testing Redis Store ===\n");
const memory = new Memory({
version: "v1.1",
embedder: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "text-embedding-3-small",
},
},
vectorStore: {
provider: "redis",
config: {
collectionName: "memories",
embeddingModelDims: 1536,
redisUrl: process.env.REDIS_URL || "redis://localhost:6379",
username: process.env.REDIS_USERNAME,
password: process.env.REDIS_PASSWORD,
},
},
llm: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "gpt-4-turbo-preview",
},
},
historyDbPath: "memory.db",
});
await runTests(memory);
}
if (require.main === module) {
if (!process.env.REDIS_URL) {
console.log("\nSkipping Redis test - environment variables not set");
process.exit(0);
}
demoRedis();
}

View File

@@ -0,0 +1,49 @@
import { Memory } from "../../src";
import { runTests } from "../utils/test-utils";
import dotenv from "dotenv";
// Load environment variables
dotenv.config();
export async function demoSupabase() {
console.log("\n=== Testing Supabase Vector Store ===\n");
const memory = new Memory({
version: "v1.1",
embedder: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "text-embedding-3-small",
},
},
vectorStore: {
provider: "supabase",
config: {
collectionName: "memories",
embeddingModelDims: 1536,
supabaseUrl: process.env.SUPABASE_URL || "",
supabaseKey: process.env.SUPABASE_KEY || "",
tableName: "memories",
},
},
llm: {
provider: "openai",
config: {
apiKey: process.env.OPENAI_API_KEY || "",
model: "gpt-4-turbo-preview",
},
},
historyDbPath: "memory.db",
});
await runTests(memory);
}
if (require.main === module) {
if (!process.env.SUPABASE_URL || !process.env.SUPABASE_KEY) {
console.log("\nSkipping Supabase test - environment variables not set");
process.exit(0);
}
demoSupabase();
}

View File

@@ -7,8 +7,8 @@
"scripts": { "scripts": {
"build": "tsc", "build": "tsc",
"test": "jest", "test": "jest",
"start": "ts-node examples/basic.ts", "start": "pnpm run example memory",
"example": "ts-node examples/basic.ts", "example": "ts-node examples/vector-stores/index.ts",
"clean": "rimraf dist", "clean": "rimraf dist",
"prepare": "npm run build" "prepare": "npm run build"
}, },

View File

@@ -57,7 +57,7 @@ export class OllamaLLM implements LLM {
arguments: JSON.stringify(call.function.arguments), arguments: JSON.stringify(call.function.arguments),
})), })),
}; };
} }
return response.content || ""; return response.content || "";
} }

View File

@@ -12,6 +12,8 @@ import { VectorStore } from "../vector_stores/base";
import { Qdrant } from "../vector_stores/qdrant"; import { Qdrant } from "../vector_stores/qdrant";
import { RedisDB } from "../vector_stores/redis"; import { RedisDB } from "../vector_stores/redis";
import { OllamaLLM } from "../llms/ollama"; import { OllamaLLM } from "../llms/ollama";
import { SupabaseDB } from "../vector_stores/supabase";
export class EmbedderFactory { export class EmbedderFactory {
static create(provider: string, config: EmbeddingConfig): Embedder { static create(provider: string, config: EmbeddingConfig): Embedder {
switch (provider.toLowerCase()) { switch (provider.toLowerCase()) {
@@ -53,6 +55,8 @@ export class VectorStoreFactory {
return new Qdrant(config as any); // Type assertion needed as config is extended return new Qdrant(config as any); // Type assertion needed as config is extended
case "redis": case "redis":
return new RedisDB(config as any); // Type assertion needed as config is extended return new RedisDB(config as any); // Type assertion needed as config is extended
case "supabase":
return new SupabaseDB(config as any); // Type assertion needed as config is extended
default: default:
throw new Error(`Unsupported vector store provider: ${provider}`); throw new Error(`Unsupported vector store provider: ${provider}`);
} }

View File

@@ -0,0 +1,290 @@
import { createClient, SupabaseClient } from "@supabase/supabase-js";
import { VectorStore } from "./base";
import { SearchFilters, VectorStoreConfig, VectorStoreResult } from "../types";
interface VectorData {
id: string;
embedding: number[];
metadata: Record<string, any>;
[key: string]: any;
}
interface VectorQueryParams {
query_embedding: number[];
match_count: number;
filter?: SearchFilters;
}
interface VectorSearchResult {
id: string;
similarity: number;
metadata: Record<string, any>;
[key: string]: any;
}
interface SupabaseConfig extends VectorStoreConfig {
supabaseUrl: string;
supabaseKey: string;
tableName: string;
embeddingColumnName?: string;
metadataColumnName?: string;
}
/*
SQL Migration to run in Supabase SQL Editor:
-- Enable the vector extension
create extension if not exists vector;
-- Create the memories table
create table if not exists memories (
id text primary key,
embedding vector(1536),
metadata jsonb,
created_at timestamp with time zone default timezone('utc', now()),
updated_at timestamp with time zone default timezone('utc', now())
);
-- Create the vector similarity search function
create or replace function match_vectors(
query_embedding vector(1536),
match_count int,
filter jsonb default '{}'::jsonb
)
returns table (
id text,
similarity float,
metadata jsonb
)
language plpgsql
as $$
begin
return query
select
t.id::text,
1 - (t.embedding <=> query_embedding) as similarity,
t.metadata
from memories t
where case
when filter::text = '{}'::text then true
else t.metadata @> filter
end
order by t.embedding <=> query_embedding
limit match_count;
end;
$$;
*/
export class SupabaseDB implements VectorStore {
private client: SupabaseClient;
private readonly tableName: string;
private readonly embeddingColumnName: string;
private readonly metadataColumnName: string;
constructor(config: SupabaseConfig) {
this.client = createClient(config.supabaseUrl, config.supabaseKey);
this.tableName = config.tableName;
this.embeddingColumnName = config.embeddingColumnName || "embedding";
this.metadataColumnName = config.metadataColumnName || "metadata";
this.initialize().catch((err) => {
console.error("Failed to initialize Supabase:", err);
throw err;
});
}
private async initialize(): Promise<void> {
try {
// Verify table exists and vector operations work by attempting a test insert
const testVector = Array(1536).fill(0);
const { error: testError } = await this.client
.from(this.tableName)
.insert({
id: "test_vector",
[this.embeddingColumnName]: testVector,
[this.metadataColumnName]: {},
})
.select();
if (testError) {
console.error("Test insert error:", testError);
throw new Error(
`Vector operations failed. Please ensure:
1. The vector extension is enabled
2. The table "${this.tableName}" exists with correct schema
3. The match_vectors function is created
See the SQL migration instructions in the code comments.`,
);
}
// Clean up test vector
await this.client.from(this.tableName).delete().eq("id", "test_vector");
console.log("Connected to Supabase successfully");
} catch (error) {
console.error("Error during Supabase initialization:", error);
throw error;
}
}
async insert(
vectors: number[][],
ids: string[],
payloads: Record<string, any>[],
): Promise<void> {
try {
const data = vectors.map((vector, idx) => ({
id: ids[idx],
[this.embeddingColumnName]: vector,
[this.metadataColumnName]: {
...payloads[idx],
created_at: new Date().toISOString(),
},
}));
const { error } = await this.client.from(this.tableName).insert(data);
if (error) throw error;
} catch (error) {
console.error("Error during vector insert:", error);
throw error;
}
}
async search(
query: number[],
limit: number = 5,
filters?: SearchFilters,
): Promise<VectorStoreResult[]> {
try {
const rpcQuery: VectorQueryParams = {
query_embedding: query,
match_count: limit,
};
if (filters) {
rpcQuery.filter = filters;
}
const { data, error } = await this.client.rpc("match_vectors", rpcQuery);
if (error) throw error;
if (!data) return [];
const results = data as VectorSearchResult[];
return results.map((result) => ({
id: result.id,
payload: result.metadata,
score: result.similarity,
}));
} catch (error) {
console.error("Error during vector search:", error);
throw error;
}
}
async get(vectorId: string): Promise<VectorStoreResult | null> {
try {
const { data, error } = await this.client
.from(this.tableName)
.select("*")
.eq("id", vectorId)
.single();
if (error) throw error;
if (!data) return null;
return {
id: data.id,
payload: data[this.metadataColumnName],
};
} catch (error) {
console.error("Error getting vector:", error);
throw error;
}
}
async update(
vectorId: string,
vector: number[],
payload: Record<string, any>,
): Promise<void> {
try {
const { error } = await this.client
.from(this.tableName)
.update({
[this.embeddingColumnName]: vector,
[this.metadataColumnName]: {
...payload,
updated_at: new Date().toISOString(),
},
})
.eq("id", vectorId);
if (error) throw error;
} catch (error) {
console.error("Error during vector update:", error);
throw error;
}
}
async delete(vectorId: string): Promise<void> {
try {
const { error } = await this.client
.from(this.tableName)
.delete()
.eq("id", vectorId);
if (error) throw error;
} catch (error) {
console.error("Error deleting vector:", error);
throw error;
}
}
async deleteCol(): Promise<void> {
try {
const { error } = await this.client
.from(this.tableName)
.delete()
.neq("id", ""); // Delete all rows
if (error) throw error;
} catch (error) {
console.error("Error deleting collection:", error);
throw error;
}
}
async list(
filters?: SearchFilters,
limit: number = 100,
): Promise<[VectorStoreResult[], number]> {
try {
let query = this.client
.from(this.tableName)
.select("*", { count: "exact" })
.limit(limit);
if (filters) {
Object.entries(filters).forEach(([key, value]) => {
query = query.eq(`${this.metadataColumnName}->>${key}`, value);
});
}
const { data, error, count } = await query;
if (error) throw error;
const results = data.map((item: VectorData) => ({
id: item.id,
payload: item[this.metadataColumnName],
}));
return [results, count || 0];
} catch (error) {
console.error("Error listing vectors:", error);
throw error;
}
}
}