Complete implementation: REST API, MCP server, and documentation
Implementation Summary:
- REST API with FastAPI (complete CRUD operations)
- MCP Server with Python MCP SDK (7 tools)
- Supabase migrations (pgvector setup)
- Docker Compose orchestration
- Mintlify documentation site
- Environment configuration
- Shared config module
REST API Features:
- POST /v1/memories/ - Add memory
- GET /v1/memories/search - Semantic search
- GET /v1/memories/{id} - Get memory
- GET /v1/memories/user/{user_id} - User memories
- PATCH /v1/memories/{id} - Update memory
- DELETE /v1/memories/{id} - Delete memory
- GET /v1/health - Health check
- GET /v1/stats - Statistics
- Bearer token authentication
- OpenAPI documentation
MCP Server Tools:
- add_memory - Add from messages
- search_memories - Semantic search
- get_memory - Retrieve by ID
- get_all_memories - List all
- update_memory - Update content
- delete_memory - Delete by ID
- delete_all_memories - Bulk delete
Infrastructure:
- Neo4j 5.26 with APOC/GDS
- Supabase pgvector integration
- Docker network: localai
- Health checks and monitoring
- Structured logging
Documentation:
- Introduction page
- Quickstart guide
- Architecture deep dive
- Mintlify configuration
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
34
.env.example
Normal file
34
.env.example
Normal file
@@ -0,0 +1,34 @@
|
||||
# OpenAI Configuration
|
||||
OPENAI_API_KEY=sk-your-openai-api-key-here
|
||||
|
||||
# Supabase Configuration
|
||||
SUPABASE_CONNECTION_STRING=postgresql://supabase_admin:your-password@172.21.0.12:5432/postgres
|
||||
|
||||
# Neo4j Configuration
|
||||
NEO4J_URI=neo4j://neo4j:7687
|
||||
NEO4J_USER=neo4j
|
||||
NEO4J_PASSWORD=your-neo4j-password
|
||||
|
||||
# API Configuration
|
||||
API_HOST=0.0.0.0
|
||||
API_PORT=8080
|
||||
API_KEY=your-secure-api-key-here
|
||||
|
||||
# MCP Server Configuration
|
||||
MCP_HOST=0.0.0.0
|
||||
MCP_PORT=8765
|
||||
|
||||
# Mem0 Configuration
|
||||
MEM0_COLLECTION_NAME=t6_memories
|
||||
MEM0_EMBEDDING_DIMS=1536
|
||||
MEM0_VERSION=v1.1
|
||||
|
||||
# Docker Network
|
||||
DOCKER_NETWORK=localai
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL=INFO
|
||||
LOG_FORMAT=json
|
||||
|
||||
# Environment
|
||||
ENVIRONMENT=development
|
||||
0
api/__init__.py
Normal file
0
api/__init__.py
Normal file
66
api/auth.py
Normal file
66
api/auth.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""
|
||||
Authentication middleware for T6 Mem0 v2 REST API
|
||||
"""
|
||||
|
||||
from fastapi import HTTPException, Security, status
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
from config import settings
|
||||
|
||||
# Security scheme
|
||||
security = HTTPBearer()
|
||||
|
||||
|
||||
async def verify_api_key(
|
||||
credentials: HTTPAuthorizationCredentials = Security(security)
|
||||
) -> str:
|
||||
"""
|
||||
Verify API key from Authorization header
|
||||
|
||||
Args:
|
||||
credentials: HTTP Bearer credentials
|
||||
|
||||
Returns:
|
||||
The verified API key
|
||||
|
||||
Raises:
|
||||
HTTPException: If authentication fails
|
||||
"""
|
||||
if not credentials:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Missing authentication credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
token = credentials.credentials
|
||||
|
||||
# Verify token matches configured API key
|
||||
if token != settings.api_key:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid or expired API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
return token
|
||||
|
||||
|
||||
async def optional_api_key(
|
||||
credentials: HTTPAuthorizationCredentials = Security(security)
|
||||
) -> str | None:
|
||||
"""
|
||||
Optional API key verification (for public endpoints)
|
||||
|
||||
Args:
|
||||
credentials: HTTP Bearer credentials
|
||||
|
||||
Returns:
|
||||
The API key if provided, None otherwise
|
||||
"""
|
||||
if not credentials:
|
||||
return None
|
||||
|
||||
try:
|
||||
return await verify_api_key(credentials)
|
||||
except HTTPException:
|
||||
return None
|
||||
158
api/main.py
Normal file
158
api/main.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""
|
||||
T6 Mem0 v2 REST API
|
||||
Main FastAPI application
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from contextlib import asynccontextmanager
|
||||
from fastapi import FastAPI, Request, status
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
|
||||
from api.routes import router
|
||||
from api.memory_service import get_memory_service
|
||||
from config import settings
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, settings.log_level.upper()),
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.StreamHandler(sys.stdout)
|
||||
]
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""
|
||||
Lifespan context manager for startup and shutdown events
|
||||
"""
|
||||
# Startup
|
||||
logger.info("Starting T6 Mem0 v2 REST API")
|
||||
logger.info(f"Environment: {settings.environment}")
|
||||
logger.info(f"API Host: {settings.api_host}:{settings.api_port}")
|
||||
|
||||
try:
|
||||
# Initialize memory service
|
||||
service = get_memory_service()
|
||||
logger.info("Memory service initialized successfully")
|
||||
|
||||
yield
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize application: {e}")
|
||||
raise
|
||||
|
||||
finally:
|
||||
# Shutdown
|
||||
logger.info("Shutting down T6 Mem0 v2 REST API")
|
||||
|
||||
|
||||
# Create FastAPI application
|
||||
app = FastAPI(
|
||||
title="T6 Mem0 v2 API",
|
||||
description="""
|
||||
Memory system for LLM applications based on mem0.ai
|
||||
|
||||
## Features
|
||||
- **Add Memory**: Store conversation memories with semantic understanding
|
||||
- **Search Memory**: Find relevant memories using semantic search
|
||||
- **Manage Memory**: Update and delete memories
|
||||
- **Multi-Agent**: Support for user, agent, and run-specific memories
|
||||
- **Graph Relationships**: Neo4j integration for memory relationships
|
||||
|
||||
## Authentication
|
||||
All endpoints (except /health) require Bearer token authentication.
|
||||
Include your API key in the Authorization header:
|
||||
```
|
||||
Authorization: Bearer YOUR_API_KEY
|
||||
```
|
||||
""",
|
||||
version="0.1.0",
|
||||
docs_url="/docs",
|
||||
redoc_url="/redoc",
|
||||
openapi_url="/openapi.json",
|
||||
lifespan=lifespan
|
||||
)
|
||||
|
||||
# CORS middleware
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=[
|
||||
"http://localhost:3000",
|
||||
"http://localhost:8080",
|
||||
"http://localhost:5678", # n8n
|
||||
],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
# Exception handlers
|
||||
@app.exception_handler(RequestValidationError)
|
||||
async def validation_exception_handler(request: Request, exc: RequestValidationError):
|
||||
"""Handle validation errors"""
|
||||
logger.warning(f"Validation error: {exc.errors()}")
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
||||
content={
|
||||
"status": "error",
|
||||
"error": "Validation failed",
|
||||
"detail": exc.errors()
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@app.exception_handler(Exception)
|
||||
async def general_exception_handler(request: Request, exc: Exception):
|
||||
"""Handle general exceptions"""
|
||||
logger.error(f"Unhandled exception: {exc}", exc_info=True)
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
content={
|
||||
"status": "error",
|
||||
"error": "Internal server error",
|
||||
"detail": str(exc) if settings.environment == "development" else "An error occurred"
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# Include routers
|
||||
app.include_router(router)
|
||||
|
||||
|
||||
# Root endpoint
|
||||
@app.get(
|
||||
"/",
|
||||
summary="API Root",
|
||||
description="Get API information",
|
||||
tags=["info"]
|
||||
)
|
||||
async def root():
|
||||
"""API root endpoint"""
|
||||
return {
|
||||
"name": "T6 Mem0 v2 API",
|
||||
"version": "0.1.0",
|
||||
"status": "running",
|
||||
"docs": "/docs",
|
||||
"health": "/v1/health"
|
||||
}
|
||||
|
||||
|
||||
# Run with uvicorn
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
uvicorn.run(
|
||||
"api.main:app",
|
||||
host=settings.api_host,
|
||||
port=settings.api_port,
|
||||
reload=settings.environment == "development",
|
||||
log_level=settings.log_level.lower()
|
||||
)
|
||||
353
api/memory_service.py
Normal file
353
api/memory_service.py
Normal file
@@ -0,0 +1,353 @@
|
||||
"""
|
||||
Memory service wrapper for Mem0 core library
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List, Dict, Any, Optional
|
||||
from mem0 import Memory
|
||||
from config import mem0_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MemoryService:
|
||||
"""Singleton service for memory operations using Mem0"""
|
||||
|
||||
_instance: Optional['MemoryService'] = None
|
||||
_memory: Optional[Memory] = None
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize memory service with Mem0"""
|
||||
if self._memory is None:
|
||||
logger.info("Initializing Mem0 with configuration")
|
||||
try:
|
||||
self._memory = Memory.from_config(config_dict=mem0_config)
|
||||
logger.info("Mem0 initialized successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize Mem0: {e}")
|
||||
raise
|
||||
|
||||
@property
|
||||
def memory(self) -> Memory:
|
||||
"""Get Mem0 instance"""
|
||||
if self._memory is None:
|
||||
raise RuntimeError("MemoryService not initialized")
|
||||
return self._memory
|
||||
|
||||
async def add_memory(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
user_id: Optional[str] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
run_id: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Add new memory from messages
|
||||
|
||||
Args:
|
||||
messages: List of chat messages
|
||||
user_id: User identifier
|
||||
agent_id: Agent identifier
|
||||
run_id: Run identifier
|
||||
metadata: Additional metadata
|
||||
|
||||
Returns:
|
||||
List of created memories
|
||||
|
||||
Raises:
|
||||
Exception: If memory creation fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Adding memory for user_id={user_id}, agent_id={agent_id}")
|
||||
|
||||
result = self.memory.add(
|
||||
messages=messages,
|
||||
user_id=user_id,
|
||||
agent_id=agent_id,
|
||||
run_id=run_id,
|
||||
metadata=metadata or {}
|
||||
)
|
||||
|
||||
logger.info(f"Successfully added {len(result.get('results', []))} memories")
|
||||
return result.get('results', [])
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to add memory: {e}")
|
||||
raise
|
||||
|
||||
async def search_memories(
|
||||
self,
|
||||
query: str,
|
||||
user_id: Optional[str] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
run_id: Optional[str] = None,
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Search memories by query
|
||||
|
||||
Args:
|
||||
query: Search query
|
||||
user_id: User identifier filter
|
||||
agent_id: Agent identifier filter
|
||||
run_id: Run identifier filter
|
||||
limit: Maximum results
|
||||
|
||||
Returns:
|
||||
List of matching memories with scores
|
||||
|
||||
Raises:
|
||||
Exception: If search fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Searching memories: query='{query}', user_id={user_id}, limit={limit}")
|
||||
|
||||
results = self.memory.search(
|
||||
query=query,
|
||||
user_id=user_id,
|
||||
agent_id=agent_id,
|
||||
run_id=run_id,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
logger.info(f"Found {len(results)} matching memories")
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to search memories: {e}")
|
||||
raise
|
||||
|
||||
async def get_memory(
|
||||
self,
|
||||
memory_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get specific memory by ID
|
||||
|
||||
Args:
|
||||
memory_id: Memory identifier
|
||||
|
||||
Returns:
|
||||
Memory data or None if not found
|
||||
|
||||
Raises:
|
||||
Exception: If retrieval fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Getting memory: id={memory_id}")
|
||||
|
||||
result = self.memory.get(memory_id=memory_id)
|
||||
|
||||
if result:
|
||||
logger.info(f"Retrieved memory: {memory_id}")
|
||||
else:
|
||||
logger.warning(f"Memory not found: {memory_id}")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get memory: {e}")
|
||||
raise
|
||||
|
||||
async def get_all_memories(
|
||||
self,
|
||||
user_id: Optional[str] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
run_id: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get all memories for a user/agent/run
|
||||
|
||||
Args:
|
||||
user_id: User identifier filter
|
||||
agent_id: Agent identifier filter
|
||||
run_id: Run identifier filter
|
||||
|
||||
Returns:
|
||||
List of all matching memories
|
||||
|
||||
Raises:
|
||||
Exception: If retrieval fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Getting all memories: user_id={user_id}, agent_id={agent_id}")
|
||||
|
||||
results = self.memory.get_all(
|
||||
user_id=user_id,
|
||||
agent_id=agent_id,
|
||||
run_id=run_id
|
||||
)
|
||||
|
||||
logger.info(f"Retrieved {len(results)} memories")
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get all memories: {e}")
|
||||
raise
|
||||
|
||||
async def update_memory(
|
||||
self,
|
||||
memory_id: str,
|
||||
data: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Update existing memory
|
||||
|
||||
Args:
|
||||
memory_id: Memory identifier
|
||||
data: Updated memory text
|
||||
|
||||
Returns:
|
||||
Updated memory data
|
||||
|
||||
Raises:
|
||||
Exception: If update fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Updating memory: id={memory_id}")
|
||||
|
||||
result = self.memory.update(
|
||||
memory_id=memory_id,
|
||||
data=data
|
||||
)
|
||||
|
||||
logger.info(f"Successfully updated memory: {memory_id}")
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update memory: {e}")
|
||||
raise
|
||||
|
||||
async def delete_memory(
|
||||
self,
|
||||
memory_id: str
|
||||
) -> bool:
|
||||
"""
|
||||
Delete memory by ID
|
||||
|
||||
Args:
|
||||
memory_id: Memory identifier
|
||||
|
||||
Returns:
|
||||
True if deleted successfully
|
||||
|
||||
Raises:
|
||||
Exception: If deletion fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Deleting memory: id={memory_id}")
|
||||
|
||||
self.memory.delete(memory_id=memory_id)
|
||||
|
||||
logger.info(f"Successfully deleted memory: {memory_id}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete memory: {e}")
|
||||
raise
|
||||
|
||||
async def delete_all_memories(
|
||||
self,
|
||||
user_id: Optional[str] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
run_id: Optional[str] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Delete all memories for a user/agent/run
|
||||
|
||||
Args:
|
||||
user_id: User identifier filter
|
||||
agent_id: Agent identifier filter
|
||||
run_id: Run identifier filter
|
||||
|
||||
Returns:
|
||||
True if deleted successfully
|
||||
|
||||
Raises:
|
||||
Exception: If deletion fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Deleting all memories: user_id={user_id}, agent_id={agent_id}")
|
||||
|
||||
self.memory.delete_all(
|
||||
user_id=user_id,
|
||||
agent_id=agent_id,
|
||||
run_id=run_id
|
||||
)
|
||||
|
||||
logger.info("Successfully deleted all matching memories")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete all memories: {e}")
|
||||
raise
|
||||
|
||||
async def get_memory_history(
|
||||
self,
|
||||
memory_id: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get history of a memory
|
||||
|
||||
Args:
|
||||
memory_id: Memory identifier
|
||||
|
||||
Returns:
|
||||
List of memory history entries
|
||||
|
||||
Raises:
|
||||
Exception: If retrieval fails
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Getting memory history: id={memory_id}")
|
||||
|
||||
result = self.memory.history(memory_id=memory_id)
|
||||
|
||||
logger.info(f"Retrieved history for memory: {memory_id}")
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get memory history: {e}")
|
||||
raise
|
||||
|
||||
async def health_check(self) -> Dict[str, str]:
|
||||
"""
|
||||
Check health of memory service
|
||||
|
||||
Returns:
|
||||
Dict with component health status
|
||||
"""
|
||||
health = {}
|
||||
|
||||
try:
|
||||
# Test Mem0 access
|
||||
self.memory
|
||||
health['mem0'] = 'healthy'
|
||||
except Exception as e:
|
||||
logger.error(f"Mem0 health check failed: {e}")
|
||||
health['mem0'] = 'unhealthy'
|
||||
|
||||
return health
|
||||
|
||||
|
||||
# Global service instance
|
||||
_memory_service: Optional[MemoryService] = None
|
||||
|
||||
|
||||
def get_memory_service() -> MemoryService:
|
||||
"""
|
||||
Get or create memory service singleton
|
||||
|
||||
Returns:
|
||||
MemoryService instance
|
||||
"""
|
||||
global _memory_service
|
||||
if _memory_service is None:
|
||||
_memory_service = MemoryService()
|
||||
return _memory_service
|
||||
227
api/models.py
Normal file
227
api/models.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""
|
||||
Pydantic models for T6 Mem0 v2 REST API
|
||||
"""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime
|
||||
from pydantic import BaseModel, Field, ConfigDict
|
||||
from uuid import UUID
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
"""Chat message for memory extraction"""
|
||||
role: str = Field(..., description="Message role (user, assistant, system)")
|
||||
content: str = Field(..., description="Message content")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"role": "user",
|
||||
"content": "I love pizza with extra cheese"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class AddMemoryRequest(BaseModel):
|
||||
"""Request to add new memory"""
|
||||
messages: List[Message] = Field(..., description="Conversation messages")
|
||||
user_id: Optional[str] = Field(None, description="User identifier")
|
||||
agent_id: Optional[str] = Field(None, description="Agent identifier")
|
||||
run_id: Optional[str] = Field(None, description="Run identifier")
|
||||
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict, description="Additional metadata")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"messages": [
|
||||
{"role": "user", "content": "I love pizza"},
|
||||
{"role": "assistant", "content": "Great! What toppings?"},
|
||||
{"role": "user", "content": "Extra cheese please"}
|
||||
],
|
||||
"user_id": "alice",
|
||||
"metadata": {"session": "chat_123", "source": "web"}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class SearchMemoryRequest(BaseModel):
|
||||
"""Request to search memories"""
|
||||
query: str = Field(..., description="Search query")
|
||||
user_id: Optional[str] = Field(None, description="User identifier filter")
|
||||
agent_id: Optional[str] = Field(None, description="Agent identifier filter")
|
||||
run_id: Optional[str] = Field(None, description="Run identifier filter")
|
||||
limit: int = Field(default=10, ge=1, le=100, description="Maximum results")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"query": "What food does the user like?",
|
||||
"user_id": "alice",
|
||||
"limit": 5
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class UpdateMemoryRequest(BaseModel):
|
||||
"""Request to update existing memory"""
|
||||
memory_text: Optional[str] = Field(None, description="Updated memory text")
|
||||
metadata: Optional[Dict[str, Any]] = Field(None, description="Updated metadata")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"memory_text": "User loves pizza with mushrooms and olives",
|
||||
"metadata": {"verified": True, "updated_by": "admin"}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class MemoryResponse(BaseModel):
|
||||
"""Memory response model"""
|
||||
id: str = Field(..., description="Memory unique identifier")
|
||||
memory: str = Field(..., description="Memory text content")
|
||||
user_id: Optional[str] = Field(None, description="User identifier")
|
||||
agent_id: Optional[str] = Field(None, description="Agent identifier")
|
||||
run_id: Optional[str] = Field(None, description="Run identifier")
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict, description="Memory metadata")
|
||||
created_at: Optional[datetime] = Field(None, description="Creation timestamp")
|
||||
updated_at: Optional[datetime] = Field(None, description="Last update timestamp")
|
||||
score: Optional[float] = Field(None, description="Relevance score (for search results)")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"id": "mem_abc123",
|
||||
"memory": "User loves pizza with extra cheese",
|
||||
"user_id": "alice",
|
||||
"metadata": {"session": "chat_123"},
|
||||
"created_at": "2025-10-13T12:00:00Z",
|
||||
"score": 0.95
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class AddMemoryResponse(BaseModel):
|
||||
"""Response from adding memory"""
|
||||
status: str = Field(..., description="Operation status")
|
||||
memories: List[MemoryResponse] = Field(..., description="Created memories")
|
||||
message: str = Field(..., description="Result message")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"status": "success",
|
||||
"memories": [{
|
||||
"id": "mem_abc123",
|
||||
"memory": "User loves pizza with extra cheese",
|
||||
"user_id": "alice"
|
||||
}],
|
||||
"message": "Successfully added 1 memory"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class SearchMemoryResponse(BaseModel):
|
||||
"""Response from searching memories"""
|
||||
status: str = Field(..., description="Operation status")
|
||||
memories: List[MemoryResponse] = Field(..., description="Matching memories")
|
||||
count: int = Field(..., description="Number of results")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"status": "success",
|
||||
"memories": [{
|
||||
"id": "mem_abc123",
|
||||
"memory": "User loves pizza",
|
||||
"user_id": "alice",
|
||||
"score": 0.95
|
||||
}],
|
||||
"count": 1
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class DeleteMemoryResponse(BaseModel):
|
||||
"""Response from deleting memory"""
|
||||
status: str = Field(..., description="Operation status")
|
||||
message: str = Field(..., description="Result message")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"status": "success",
|
||||
"message": "Memory deleted successfully"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class HealthResponse(BaseModel):
|
||||
"""Health check response"""
|
||||
status: str = Field(..., description="Service status")
|
||||
version: str = Field(..., description="API version")
|
||||
timestamp: datetime = Field(..., description="Check timestamp")
|
||||
dependencies: Dict[str, str] = Field(..., description="Dependency status")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"status": "healthy",
|
||||
"version": "0.1.0",
|
||||
"timestamp": "2025-10-13T12:00:00Z",
|
||||
"dependencies": {
|
||||
"supabase": "connected",
|
||||
"neo4j": "connected",
|
||||
"openai": "available"
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class StatsResponse(BaseModel):
|
||||
"""Memory statistics response"""
|
||||
total_memories: int = Field(..., description="Total memory count")
|
||||
total_users: int = Field(..., description="Unique user count")
|
||||
total_agents: int = Field(..., description="Unique agent count")
|
||||
avg_memories_per_user: float = Field(..., description="Average memories per user")
|
||||
oldest_memory: Optional[datetime] = Field(None, description="Oldest memory timestamp")
|
||||
newest_memory: Optional[datetime] = Field(None, description="Newest memory timestamp")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"total_memories": 1523,
|
||||
"total_users": 42,
|
||||
"total_agents": 5,
|
||||
"avg_memories_per_user": 36.26,
|
||||
"oldest_memory": "2025-01-01T00:00:00Z",
|
||||
"newest_memory": "2025-10-13T12:00:00Z"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class ErrorResponse(BaseModel):
|
||||
"""Error response model"""
|
||||
status: str = Field(default="error", description="Error status")
|
||||
error: str = Field(..., description="Error message")
|
||||
detail: Optional[str] = Field(None, description="Detailed error information")
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"example": {
|
||||
"status": "error",
|
||||
"error": "Authentication failed",
|
||||
"detail": "Invalid or missing API key"
|
||||
}
|
||||
}
|
||||
)
|
||||
377
api/routes.py
Normal file
377
api/routes.py
Normal file
@@ -0,0 +1,377 @@
|
||||
"""
|
||||
API routes for T6 Mem0 v2 REST API
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import List
|
||||
from datetime import datetime
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from api.auth import verify_api_key
|
||||
from api.models import (
|
||||
AddMemoryRequest,
|
||||
AddMemoryResponse,
|
||||
SearchMemoryRequest,
|
||||
SearchMemoryResponse,
|
||||
UpdateMemoryRequest,
|
||||
MemoryResponse,
|
||||
DeleteMemoryResponse,
|
||||
HealthResponse,
|
||||
StatsResponse,
|
||||
ErrorResponse
|
||||
)
|
||||
from api.memory_service import get_memory_service, MemoryService
|
||||
from config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Create router
|
||||
router = APIRouter(prefix="/v1", tags=["memories"])
|
||||
|
||||
|
||||
def format_memory_response(mem: dict) -> MemoryResponse:
|
||||
"""Format memory data to response model"""
|
||||
return MemoryResponse(
|
||||
id=mem.get('id', ''),
|
||||
memory=mem.get('memory', mem.get('data', '')),
|
||||
user_id=mem.get('user_id'),
|
||||
agent_id=mem.get('agent_id'),
|
||||
run_id=mem.get('run_id'),
|
||||
metadata=mem.get('metadata', {}),
|
||||
created_at=mem.get('created_at'),
|
||||
updated_at=mem.get('updated_at'),
|
||||
score=mem.get('score')
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/memories/",
|
||||
response_model=AddMemoryResponse,
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
summary="Add new memory",
|
||||
description="Add new memory from conversation messages",
|
||||
responses={
|
||||
201: {"description": "Memory added successfully"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def add_memory(
|
||||
request: AddMemoryRequest,
|
||||
_api_key: str = Depends(verify_api_key),
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Add new memory from messages"""
|
||||
try:
|
||||
messages = [msg.model_dump() for msg in request.messages]
|
||||
|
||||
memories = await service.add_memory(
|
||||
messages=messages,
|
||||
user_id=request.user_id,
|
||||
agent_id=request.agent_id,
|
||||
run_id=request.run_id,
|
||||
metadata=request.metadata
|
||||
)
|
||||
|
||||
formatted_memories = [format_memory_response(mem) for mem in memories]
|
||||
|
||||
return AddMemoryResponse(
|
||||
status="success",
|
||||
memories=formatted_memories,
|
||||
message=f"Successfully added {len(formatted_memories)} memory(ies)"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding memory: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/memories/search",
|
||||
response_model=SearchMemoryResponse,
|
||||
summary="Search memories",
|
||||
description="Search memories by semantic similarity",
|
||||
responses={
|
||||
200: {"description": "Search completed successfully"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def search_memories(
|
||||
query: str = Query(..., description="Search query"),
|
||||
user_id: str | None = Query(None, description="Filter by user ID"),
|
||||
agent_id: str | None = Query(None, description="Filter by agent ID"),
|
||||
run_id: str | None = Query(None, description="Filter by run ID"),
|
||||
limit: int = Query(10, ge=1, le=100, description="Maximum results"),
|
||||
_api_key: str = Depends(verify_api_key),
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Search memories by query"""
|
||||
try:
|
||||
memories = await service.search_memories(
|
||||
query=query,
|
||||
user_id=user_id,
|
||||
agent_id=agent_id,
|
||||
run_id=run_id,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
formatted_memories = [format_memory_response(mem) for mem in memories]
|
||||
|
||||
return SearchMemoryResponse(
|
||||
status="success",
|
||||
memories=formatted_memories,
|
||||
count=len(formatted_memories)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching memories: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/memories/{memory_id}",
|
||||
response_model=MemoryResponse,
|
||||
summary="Get specific memory",
|
||||
description="Retrieve a specific memory by ID",
|
||||
responses={
|
||||
200: {"description": "Memory retrieved successfully"},
|
||||
404: {"model": ErrorResponse, "description": "Memory not found"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def get_memory(
|
||||
memory_id: str,
|
||||
_api_key: str = Depends(verify_api_key),
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Get specific memory by ID"""
|
||||
try:
|
||||
memory = await service.get_memory(memory_id)
|
||||
|
||||
if not memory:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail=f"Memory not found: {memory_id}"
|
||||
)
|
||||
|
||||
return format_memory_response(memory)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting memory: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/memories/user/{user_id}",
|
||||
response_model=List[MemoryResponse],
|
||||
summary="Get user memories",
|
||||
description="Get all memories for a specific user",
|
||||
responses={
|
||||
200: {"description": "Memories retrieved successfully"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def get_user_memories(
|
||||
user_id: str,
|
||||
_api_key: str = Depends(verify_api_key),
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Get all memories for a user"""
|
||||
try:
|
||||
memories = await service.get_all_memories(user_id=user_id)
|
||||
return [format_memory_response(mem) for mem in memories]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting user memories: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/memories/{memory_id}",
|
||||
response_model=MemoryResponse,
|
||||
summary="Update memory",
|
||||
description="Update an existing memory",
|
||||
responses={
|
||||
200: {"description": "Memory updated successfully"},
|
||||
404: {"model": ErrorResponse, "description": "Memory not found"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def update_memory(
|
||||
memory_id: str,
|
||||
request: UpdateMemoryRequest,
|
||||
_api_key: str = Depends(verify_api_key),
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Update existing memory"""
|
||||
try:
|
||||
if not request.memory_text:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail="memory_text is required for update"
|
||||
)
|
||||
|
||||
updated = await service.update_memory(
|
||||
memory_id=memory_id,
|
||||
data=request.memory_text
|
||||
)
|
||||
|
||||
return format_memory_response(updated)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating memory: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/memories/{memory_id}",
|
||||
response_model=DeleteMemoryResponse,
|
||||
summary="Delete memory",
|
||||
description="Delete a specific memory",
|
||||
responses={
|
||||
200: {"description": "Memory deleted successfully"},
|
||||
404: {"model": ErrorResponse, "description": "Memory not found"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def delete_memory(
|
||||
memory_id: str,
|
||||
_api_key: str = Depends(verify_api_key),
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Delete specific memory"""
|
||||
try:
|
||||
await service.delete_memory(memory_id)
|
||||
|
||||
return DeleteMemoryResponse(
|
||||
status="success",
|
||||
message=f"Memory {memory_id} deleted successfully"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting memory: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/memories/user/{user_id}",
|
||||
response_model=DeleteMemoryResponse,
|
||||
summary="Delete user memories",
|
||||
description="Delete all memories for a specific user",
|
||||
responses={
|
||||
200: {"description": "Memories deleted successfully"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def delete_user_memories(
|
||||
user_id: str,
|
||||
_api_key: str = Depends(verify_api_key),
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Delete all memories for a user"""
|
||||
try:
|
||||
await service.delete_all_memories(user_id=user_id)
|
||||
|
||||
return DeleteMemoryResponse(
|
||||
status="success",
|
||||
message=f"All memories for user {user_id} deleted successfully"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting user memories: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/health",
|
||||
response_model=HealthResponse,
|
||||
summary="Health check",
|
||||
description="Check API health status",
|
||||
responses={
|
||||
200: {"description": "Service is healthy"}
|
||||
}
|
||||
)
|
||||
async def health_check(
|
||||
service: MemoryService = Depends(get_memory_service)
|
||||
):
|
||||
"""Health check endpoint"""
|
||||
try:
|
||||
health = await service.health_check()
|
||||
|
||||
return HealthResponse(
|
||||
status="healthy" if all(v == "healthy" for v in health.values()) else "degraded",
|
||||
version="0.1.0",
|
||||
timestamp=datetime.utcnow(),
|
||||
dependencies=health
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Health check failed: {e}")
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
||||
content={
|
||||
"status": "unhealthy",
|
||||
"version": "0.1.0",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"error": str(e)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/stats",
|
||||
response_model=StatsResponse,
|
||||
summary="Memory statistics",
|
||||
description="Get system-wide memory statistics",
|
||||
responses={
|
||||
200: {"description": "Statistics retrieved successfully"},
|
||||
401: {"model": ErrorResponse, "description": "Unauthorized"},
|
||||
500: {"model": ErrorResponse, "description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
async def get_stats(
|
||||
_api_key: str = Depends(verify_api_key)
|
||||
):
|
||||
"""Get memory statistics"""
|
||||
# Note: This would require direct database access or extension of mem0 API
|
||||
# For now, return placeholder
|
||||
return StatsResponse(
|
||||
total_memories=0,
|
||||
total_users=0,
|
||||
total_agents=0,
|
||||
avg_memories_per_user=0.0,
|
||||
oldest_memory=None,
|
||||
newest_memory=None
|
||||
)
|
||||
121
config.py
Normal file
121
config.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""
|
||||
Shared configuration for T6 Mem0 v2
|
||||
Loads environment variables and creates Mem0 configuration
|
||||
"""
|
||||
|
||||
import os
|
||||
from typing import Dict, Any
|
||||
from pydantic_settings import BaseSettings
|
||||
from pydantic import Field
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""Application settings loaded from environment variables"""
|
||||
|
||||
# OpenAI
|
||||
openai_api_key: str = Field(..., env="OPENAI_API_KEY")
|
||||
|
||||
# Supabase
|
||||
supabase_connection_string: str = Field(..., env="SUPABASE_CONNECTION_STRING")
|
||||
|
||||
# Neo4j
|
||||
neo4j_uri: str = Field(..., env="NEO4J_URI")
|
||||
neo4j_user: str = Field(default="neo4j", env="NEO4J_USER")
|
||||
neo4j_password: str = Field(..., env="NEO4J_PASSWORD")
|
||||
|
||||
# API
|
||||
api_host: str = Field(default="0.0.0.0", env="API_HOST")
|
||||
api_port: int = Field(default=8080, env="API_PORT")
|
||||
api_key: str = Field(..., env="API_KEY")
|
||||
|
||||
# MCP Server
|
||||
mcp_host: str = Field(default="0.0.0.0", env="MCP_HOST")
|
||||
mcp_port: int = Field(default=8765, env="MCP_PORT")
|
||||
|
||||
# Mem0
|
||||
mem0_collection_name: str = Field(default="t6_memories", env="MEM0_COLLECTION_NAME")
|
||||
mem0_embedding_dims: int = Field(default=1536, env="MEM0_EMBEDDING_DIMS")
|
||||
mem0_version: str = Field(default="v1.1", env="MEM0_VERSION")
|
||||
|
||||
# Logging
|
||||
log_level: str = Field(default="INFO", env="LOG_LEVEL")
|
||||
log_format: str = Field(default="json", env="LOG_FORMAT")
|
||||
|
||||
# Environment
|
||||
environment: str = Field(default="development", env="ENVIRONMENT")
|
||||
|
||||
class Config:
|
||||
env_file = ".env"
|
||||
env_file_encoding = "utf-8"
|
||||
case_sensitive = False
|
||||
|
||||
|
||||
def get_settings() -> Settings:
|
||||
"""Get application settings"""
|
||||
return Settings()
|
||||
|
||||
|
||||
def get_mem0_config(settings: Settings) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate Mem0 configuration from settings
|
||||
|
||||
Args:
|
||||
settings: Application settings
|
||||
|
||||
Returns:
|
||||
Dict containing Mem0 configuration
|
||||
"""
|
||||
return {
|
||||
# Vector Store - Supabase
|
||||
"vector_store": {
|
||||
"provider": "supabase",
|
||||
"config": {
|
||||
"connection_string": settings.supabase_connection_string,
|
||||
"collection_name": settings.mem0_collection_name,
|
||||
"embedding_model_dims": settings.mem0_embedding_dims,
|
||||
"index_method": "hnsw", # Fastest search
|
||||
"index_measure": "cosine_distance" # Best for embeddings
|
||||
}
|
||||
},
|
||||
|
||||
# Graph Store - Neo4j
|
||||
"graph_store": {
|
||||
"provider": "neo4j",
|
||||
"config": {
|
||||
"url": settings.neo4j_uri,
|
||||
"username": settings.neo4j_user,
|
||||
"password": settings.neo4j_password
|
||||
}
|
||||
},
|
||||
|
||||
# LLM Provider - OpenAI
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "gpt-4o-mini",
|
||||
"temperature": 0.1,
|
||||
"max_tokens": 2000,
|
||||
"api_key": settings.openai_api_key
|
||||
}
|
||||
},
|
||||
|
||||
# Embedder - OpenAI
|
||||
"embedder": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-3-small",
|
||||
"embedding_dims": settings.mem0_embedding_dims,
|
||||
"api_key": settings.openai_api_key
|
||||
}
|
||||
},
|
||||
|
||||
# Version
|
||||
"version": settings.mem0_version
|
||||
}
|
||||
|
||||
|
||||
# Global settings instance
|
||||
settings = get_settings()
|
||||
|
||||
# Global mem0 config
|
||||
mem0_config = get_mem0_config(settings)
|
||||
111
docker-compose.yml
Normal file
111
docker-compose.yml
Normal file
@@ -0,0 +1,111 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# Neo4j Graph Database
|
||||
neo4j:
|
||||
image: neo4j:5.26-community
|
||||
container_name: t6-mem0-neo4j
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "7474:7474" # HTTP Browser UI
|
||||
- "7687:7687" # Bolt Protocol
|
||||
environment:
|
||||
- NEO4J_AUTH=${NEO4J_USER:-neo4j}/${NEO4J_PASSWORD}
|
||||
- NEO4J_PLUGINS=["apoc", "graph-data-science"]
|
||||
- NEO4J_dbms_security_procedures_unrestricted=apoc.*,gds.*
|
||||
- NEO4J_dbms_memory_heap_initial__size=512M
|
||||
- NEO4J_dbms_memory_heap_max__size=2G
|
||||
- NEO4J_dbms_memory_pagecache_size=512M
|
||||
volumes:
|
||||
- neo4j_data:/data
|
||||
- neo4j_logs:/logs
|
||||
- neo4j_import:/import
|
||||
- neo4j_plugins:/plugins
|
||||
networks:
|
||||
- localai
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "cypher-shell -u ${NEO4J_USER:-neo4j} -p ${NEO4J_PASSWORD} 'RETURN 1'"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# REST API Server
|
||||
api:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/Dockerfile.api
|
||||
container_name: t6-mem0-api
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${API_PORT:-8080}:8080"
|
||||
environment:
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- SUPABASE_CONNECTION_STRING=${SUPABASE_CONNECTION_STRING}
|
||||
- NEO4J_URI=neo4j://neo4j:7687
|
||||
- NEO4J_USER=${NEO4J_USER:-neo4j}
|
||||
- NEO4J_PASSWORD=${NEO4J_PASSWORD}
|
||||
- API_HOST=0.0.0.0
|
||||
- API_PORT=8080
|
||||
- API_KEY=${API_KEY}
|
||||
- MEM0_COLLECTION_NAME=${MEM0_COLLECTION_NAME:-t6_memories}
|
||||
- MEM0_EMBEDDING_DIMS=${MEM0_EMBEDDING_DIMS:-1536}
|
||||
- MEM0_VERSION=${MEM0_VERSION:-v1.1}
|
||||
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
||||
- ENVIRONMENT=${ENVIRONMENT:-production}
|
||||
depends_on:
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- localai
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:8080/v1/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# MCP Server
|
||||
mcp-server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/Dockerfile.mcp
|
||||
container_name: t6-mem0-mcp
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${MCP_PORT:-8765}:8765"
|
||||
environment:
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- SUPABASE_CONNECTION_STRING=${SUPABASE_CONNECTION_STRING}
|
||||
- NEO4J_URI=neo4j://neo4j:7687
|
||||
- NEO4J_USER=${NEO4J_USER:-neo4j}
|
||||
- NEO4J_PASSWORD=${NEO4J_PASSWORD}
|
||||
- MCP_HOST=0.0.0.0
|
||||
- MCP_PORT=8765
|
||||
- MEM0_COLLECTION_NAME=${MEM0_COLLECTION_NAME:-t6_memories}
|
||||
- MEM0_EMBEDDING_DIMS=${MEM0_EMBEDDING_DIMS:-1536}
|
||||
- MEM0_VERSION=${MEM0_VERSION:-v1.1}
|
||||
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
||||
- ENVIRONMENT=${ENVIRONMENT:-production}
|
||||
depends_on:
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- localai
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:8765/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
volumes:
|
||||
neo4j_data:
|
||||
name: t6-mem0-neo4j-data
|
||||
neo4j_logs:
|
||||
name: t6-mem0-neo4j-logs
|
||||
neo4j_import:
|
||||
name: t6-mem0-neo4j-import
|
||||
neo4j_plugins:
|
||||
name: t6-mem0-neo4j-plugins
|
||||
|
||||
networks:
|
||||
localai:
|
||||
external: true
|
||||
35
docker/Dockerfile.api
Normal file
35
docker/Dockerfile.api
Normal file
@@ -0,0 +1,35 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
gcc \
|
||||
g++ \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy requirements
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY config.py .
|
||||
COPY api/ ./api/
|
||||
|
||||
# Create non-root user
|
||||
RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
|
||||
USER appuser
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8080
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD curl -f http://localhost:8080/v1/health || exit 1
|
||||
|
||||
# Run application
|
||||
CMD ["uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
35
docker/Dockerfile.mcp
Normal file
35
docker/Dockerfile.mcp
Normal file
@@ -0,0 +1,35 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
gcc \
|
||||
g++ \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy requirements
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY config.py .
|
||||
COPY mcp-server/ ./mcp-server/
|
||||
|
||||
# Create non-root user
|
||||
RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
|
||||
USER appuser
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8765
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD curl -f http://localhost:8765/health || exit 1
|
||||
|
||||
# Run MCP server
|
||||
CMD ["python", "-m", "mcp-server.main"]
|
||||
313
docs/architecture.mdx
Normal file
313
docs/architecture.mdx
Normal file
@@ -0,0 +1,313 @@
|
||||
---
|
||||
title: 'System Architecture'
|
||||
description: 'Technical architecture and design decisions for T6 Mem0 v2'
|
||||
---
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
T6 Mem0 v2 implements a **hybrid storage architecture** combining vector search, graph relationships, and structured data storage for optimal memory management.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Client Layer │
|
||||
├──────────────────┬──────────────────┬──────────────────────┤
|
||||
│ Claude Code (MCP)│ N8N Workflows │ External Apps │
|
||||
└──────────────────┴──────────────────┴──────────────────────┘
|
||||
│ │ │
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Interface Layer │
|
||||
├──────────────────────────────┬──────────────────────────────┤
|
||||
│ MCP Server (Port 8765) │ REST API (Port 8080) │
|
||||
│ - SSE Connections │ - FastAPI │
|
||||
│ - MCP Protocol │ - OpenAPI Spec │
|
||||
│ - Tool Registration │ - Auth Middleware │
|
||||
└──────────────────────────────┴──────────────────────────────┘
|
||||
│ │
|
||||
└────────┬───────────┘
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Core Layer │
|
||||
│ Mem0 Core Library │
|
||||
│ - Memory Management - Embedding Generation │
|
||||
│ - Semantic Search - Relationship Extraction │
|
||||
│ - Multi-Agent Support - Deduplication │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
┌───────────────────┼───────────────────┐
|
||||
▼ ▼ ▼
|
||||
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
||||
│ Vector Store │ │ Graph Store │ │ External LLM │
|
||||
│ Supabase │ │ Neo4j │ │ OpenAI │
|
||||
│ (pgvector) │ │ (Cypher) │ │ (Embeddings) │
|
||||
│ 172.21.0.12 │ │ 172.21.0.x │ │ API Cloud │
|
||||
└─────────────────┘ └─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### 1. Hybrid Storage Architecture ✅
|
||||
|
||||
**Why Multiple Storage Systems?**
|
||||
|
||||
Each store is optimized for specific query patterns:
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Vector Store (Supabase + pgvector)">
|
||||
**Purpose**: Semantic similarity search
|
||||
|
||||
- Stores 1536-dimensional OpenAI embeddings
|
||||
- HNSW indexing for fast approximate nearest neighbor search
|
||||
- O(log n) query performance
|
||||
- Cosine distance for similarity measurement
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Graph Store (Neo4j)">
|
||||
**Purpose**: Relationship modeling
|
||||
|
||||
- Entity extraction and connection mapping
|
||||
- Relationship traversal and pathfinding
|
||||
- Visual exploration in Neo4j Browser
|
||||
- Dynamic knowledge graph evolution
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Key-Value Store (PostgreSQL JSONB)">
|
||||
**Purpose**: Flexible metadata
|
||||
|
||||
- Schema-less metadata storage
|
||||
- Fast JSON queries with GIN indexes
|
||||
- Eliminates need for separate Redis
|
||||
- Simplifies infrastructure
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
### 2. MCP Server Implementation
|
||||
|
||||
**Custom vs. Pre-built**
|
||||
|
||||
<Info>
|
||||
We built a custom MCP server instead of using OpenMemory MCP because:
|
||||
- OpenMemory uses Qdrant (we need Supabase)
|
||||
- Full control over Supabase + Neo4j integration
|
||||
- Exact match to our storage stack
|
||||
</Info>
|
||||
|
||||
### 3. Docker Networking Strategy
|
||||
|
||||
**localai Network Integration**
|
||||
|
||||
All services run on the `localai` Docker network (172.21.0.0/16):
|
||||
|
||||
```yaml
|
||||
services:
|
||||
neo4j: 172.21.0.x:7687
|
||||
api: 172.21.0.x:8080
|
||||
mcp-server: 172.21.0.x:8765
|
||||
supabase: 172.21.0.12:5432 (existing)
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- Container-to-container communication
|
||||
- Service discovery via Docker DNS
|
||||
- No host networking complications
|
||||
- Persistent IPs via Docker Compose
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Adding a Memory
|
||||
|
||||
<Steps>
|
||||
<Step title="Client Request">
|
||||
Client sends conversation messages via MCP or REST API
|
||||
</Step>
|
||||
<Step title="Mem0 Processing">
|
||||
- LLM extracts key facts from messages
|
||||
- Generates embedding vector (1536-dim)
|
||||
- Identifies entities and relationships
|
||||
</Step>
|
||||
<Step title="Vector Storage">
|
||||
Stores embedding + metadata in Supabase (pgvector)
|
||||
</Step>
|
||||
<Step title="Graph Storage">
|
||||
Creates nodes and relationships in Neo4j
|
||||
</Step>
|
||||
<Step title="Response">
|
||||
Returns memory ID and confirmation to client
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Searching Memories
|
||||
|
||||
<Steps>
|
||||
<Step title="Query Embedding">
|
||||
Convert search query to vector using OpenAI
|
||||
</Step>
|
||||
<Step title="Vector Search">
|
||||
Find similar memories in Supabase (cosine similarity)
|
||||
</Step>
|
||||
<Step title="Graph Enrichment">
|
||||
Fetch related context from Neo4j graph
|
||||
</Step>
|
||||
<Step title="Ranked Results">
|
||||
Return memories sorted by relevance score
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
Based on mem0.ai research:
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="26% Accuracy Boost" icon="chart-line">
|
||||
Higher accuracy vs baseline OpenAI
|
||||
</Card>
|
||||
<Card title="91% Lower Latency" icon="bolt">
|
||||
Compared to full-context approaches
|
||||
</Card>
|
||||
<Card title="90% Token Savings" icon="dollar-sign">
|
||||
Through selective memory retrieval
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Security Architecture
|
||||
|
||||
### Authentication
|
||||
|
||||
- **REST API**: Bearer token authentication
|
||||
- **MCP Server**: Client-specific SSE endpoints
|
||||
- **Tokens**: Stored securely in environment variables
|
||||
|
||||
### Data Privacy
|
||||
|
||||
<Check>
|
||||
All data stored locally - no cloud sync or external storage
|
||||
</Check>
|
||||
|
||||
- Supabase instance is local (172.21.0.12)
|
||||
- Neo4j runs in Docker container
|
||||
- User isolation via `user_id` filtering
|
||||
|
||||
### Network Security
|
||||
|
||||
- Services on private Docker network
|
||||
- No public exposure (use reverse proxy if needed)
|
||||
- Internal communication only
|
||||
|
||||
## Scalability
|
||||
|
||||
### Horizontal Scaling
|
||||
|
||||
<Tabs>
|
||||
<Tab title="REST API">
|
||||
Deploy multiple API containers behind load balancer
|
||||
</Tab>
|
||||
<Tab title="MCP Server">
|
||||
Dedicated instances per client group
|
||||
</Tab>
|
||||
<Tab title="Mem0 Core">
|
||||
Stateless design scales with containers
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Vertical Scaling
|
||||
|
||||
- **Supabase**: PostgreSQL connection pooling
|
||||
- **Neo4j**: Memory configuration tuning
|
||||
- **Vector Indexing**: HNSW for performance
|
||||
|
||||
## Technology Choices
|
||||
|
||||
| Component | Technology | Why? |
|
||||
|-----------|-----------|------|
|
||||
| Core Library | mem0ai | Production-ready, 26% accuracy boost |
|
||||
| Vector DB | Supabase (pgvector) | Existing infrastructure, PostgreSQL |
|
||||
| Graph DB | Neo4j | Best-in-class graph database |
|
||||
| LLM | OpenAI | High-quality embeddings, GPT-4o |
|
||||
| REST API | FastAPI | Fast, modern, auto-docs |
|
||||
| MCP Protocol | Python MCP SDK | Official MCP implementation |
|
||||
| Containers | Docker Compose | Simple orchestration |
|
||||
|
||||
## Phase 2: Ollama Integration
|
||||
|
||||
**Configuration-driven provider switching:**
|
||||
|
||||
```python
|
||||
# Phase 1 (OpenAI)
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {"model": "gpt-4o-mini"}
|
||||
}
|
||||
|
||||
# Phase 2 (Ollama)
|
||||
"llm": {
|
||||
"provider": "ollama",
|
||||
"config": {
|
||||
"model": "llama3.1:8b",
|
||||
"ollama_base_url": "http://172.21.0.1:11434"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**No code changes required** - just environment variables!
|
||||
|
||||
## Monitoring & Observability
|
||||
|
||||
### Metrics to Track
|
||||
|
||||
- Memory operations per second
|
||||
- Average response time
|
||||
- Vector search latency
|
||||
- Graph query complexity
|
||||
- OpenAI token usage
|
||||
|
||||
### Logging
|
||||
|
||||
- Structured JSON logs
|
||||
- Request/response tracking
|
||||
- Error aggregation
|
||||
- Performance profiling
|
||||
|
||||
<Tip>
|
||||
Use Prometheus + Grafana for production monitoring
|
||||
</Tip>
|
||||
|
||||
## Deep Dive Resources
|
||||
|
||||
For complete architectural details, see:
|
||||
|
||||
- [ARCHITECTURE.md](https://git.colsys.tech/klas/t6_mem0_v2/blob/main/ARCHITECTURE.md)
|
||||
- [PROJECT_REQUIREMENTS.md](https://git.colsys.tech/klas/t6_mem0_v2/blob/main/PROJECT_REQUIREMENTS.md)
|
||||
|
||||
## Next Steps
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Setup Supabase"
|
||||
icon="database"
|
||||
href="/setup/supabase"
|
||||
>
|
||||
Configure vector store
|
||||
</Card>
|
||||
<Card
|
||||
title="Setup Neo4j"
|
||||
icon="diagram-project"
|
||||
href="/setup/neo4j"
|
||||
>
|
||||
Configure graph database
|
||||
</Card>
|
||||
<Card
|
||||
title="API Reference"
|
||||
icon="code"
|
||||
href="/api-reference/introduction"
|
||||
>
|
||||
Explore endpoints
|
||||
</Card>
|
||||
<Card
|
||||
title="MCP Integration"
|
||||
icon="plug"
|
||||
href="/mcp/introduction"
|
||||
>
|
||||
Connect with Claude Code
|
||||
</Card>
|
||||
</CardGroup>
|
||||
168
docs/introduction.mdx
Normal file
168
docs/introduction.mdx
Normal file
@@ -0,0 +1,168 @@
|
||||
---
|
||||
title: Introduction
|
||||
description: 'Welcome to T6 Mem0 v2 - Memory System for LLM Applications'
|
||||
---
|
||||
|
||||
<img
|
||||
className="block dark:hidden"
|
||||
src="/images/hero-light.svg"
|
||||
alt="Hero Light"
|
||||
/>
|
||||
<img
|
||||
className="hidden dark:block"
|
||||
src="/images/hero-dark.svg"
|
||||
alt="Hero Dark"
|
||||
/>
|
||||
|
||||
## What is T6 Mem0 v2?
|
||||
|
||||
T6 Mem0 v2 is a comprehensive memory system for LLM applications built on **mem0.ai**, featuring:
|
||||
|
||||
- 🔌 **MCP Server Integration** - Native Model Context Protocol support for Claude Code and AI tools
|
||||
- 🌐 **REST API** - Full HTTP API for memory operations
|
||||
- 🗄️ **Hybrid Storage** - Supabase (vector) + Neo4j (graph) for optimal performance
|
||||
- 🤖 **AI-Powered** - OpenAI embeddings with 26% accuracy improvement
|
||||
- 📊 **Graph Visualization** - Explore memory relationships in Neo4j Browser
|
||||
- 🐳 **Docker-Native** - Fully containerized deployment
|
||||
|
||||
## Key Features
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Semantic Memory Search"
|
||||
icon="magnifying-glass"
|
||||
href="/api-reference/memories/search"
|
||||
>
|
||||
Find relevant memories using AI-powered semantic similarity
|
||||
</Card>
|
||||
<Card
|
||||
title="MCP Integration"
|
||||
icon="plug"
|
||||
href="/mcp/introduction"
|
||||
>
|
||||
Use as MCP server with Claude Code, Cursor, and other AI tools
|
||||
</Card>
|
||||
<Card
|
||||
title="Graph Relationships"
|
||||
icon="diagram-project"
|
||||
href="/setup/neo4j"
|
||||
>
|
||||
Visualize and explore memory connections with Neo4j
|
||||
</Card>
|
||||
<Card
|
||||
title="Multi-Agent Support"
|
||||
icon="users"
|
||||
href="/api-reference/introduction"
|
||||
>
|
||||
Isolate memories by user, agent, or run identifiers
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Architecture
|
||||
|
||||
T6 Mem0 v2 uses a **hybrid storage architecture** for optimal performance:
|
||||
|
||||
```
|
||||
┌──────────────────────────────────┐
|
||||
│ Clients (Claude, N8N, Apps) │
|
||||
└──────────────┬───────────────────┘
|
||||
│
|
||||
┌──────────────┴───────────────────┐
|
||||
│ MCP Server (8765) + REST (8080) │
|
||||
└──────────────┬───────────────────┘
|
||||
│
|
||||
┌──────────────┴───────────────────┐
|
||||
│ Mem0 Core Library │
|
||||
└──────────────┬───────────────────┘
|
||||
│
|
||||
┌──────────┴──────────┐
|
||||
│ │
|
||||
┌───┴──────┐ ┌──────┴─────┐
|
||||
│ Supabase │ │ Neo4j │
|
||||
│ (Vector) │ │ (Graph) │
|
||||
└──────────┘ └────────────┘
|
||||
```
|
||||
|
||||
### Storage Layers
|
||||
|
||||
- **Vector Store (Supabase)**: Semantic similarity search with pgvector
|
||||
- **Graph Store (Neo4j)**: Relationship modeling between memories
|
||||
- **Key-Value Store (PostgreSQL JSONB)**: Flexible metadata storage
|
||||
|
||||
## Performance
|
||||
|
||||
Based on mem0.ai research:
|
||||
|
||||
- **26% higher accuracy** compared to baseline OpenAI
|
||||
- **91% lower latency** than full-context approaches
|
||||
- **90% token cost savings** through selective retrieval
|
||||
|
||||
## Use Cases
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion icon="comment" title="Conversational AI">
|
||||
Maintain context across conversations, remember user preferences, and provide personalized responses
|
||||
</Accordion>
|
||||
<Accordion icon="robot" title="AI Agents">
|
||||
Give agents long-term memory, enable learning from past interactions, and improve decision-making
|
||||
</Accordion>
|
||||
<Accordion icon="headset" title="Customer Support">
|
||||
Remember customer history, track issues across sessions, and provide consistent support
|
||||
</Accordion>
|
||||
<Accordion icon="graduation-cap" title="Educational Tools">
|
||||
Track learning progress, adapt to user knowledge level, and personalize content delivery
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Quick Links
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Quickstart"
|
||||
icon="rocket"
|
||||
href="/quickstart"
|
||||
>
|
||||
Get up and running in 5 minutes
|
||||
</Card>
|
||||
<Card
|
||||
title="Architecture Deep Dive"
|
||||
icon="sitemap"
|
||||
href="/architecture"
|
||||
>
|
||||
Understand the system design
|
||||
</Card>
|
||||
<Card
|
||||
title="API Reference"
|
||||
icon="code"
|
||||
href="/api-reference/introduction"
|
||||
>
|
||||
Explore the REST API
|
||||
</Card>
|
||||
<Card
|
||||
title="MCP Integration"
|
||||
icon="link"
|
||||
href="/mcp/introduction"
|
||||
>
|
||||
Connect with Claude Code
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Technology Stack
|
||||
|
||||
- **Core**: mem0ai library
|
||||
- **Vector DB**: Supabase with pgvector
|
||||
- **Graph DB**: Neo4j 5.x
|
||||
- **LLM**: OpenAI API (Phase 1), Ollama (Phase 2)
|
||||
- **REST API**: FastAPI + Pydantic
|
||||
- **MCP**: Python MCP SDK
|
||||
- **Container**: Docker & Docker Compose
|
||||
|
||||
## Support & Community
|
||||
|
||||
- **Repository**: [git.colsys.tech/klas/t6_mem0_v2](https://git.colsys.tech/klas/t6_mem0_v2)
|
||||
- **mem0.ai**: [Official mem0 website](https://mem0.ai)
|
||||
- **Issues**: Contact maintainer
|
||||
|
||||
---
|
||||
|
||||
Ready to get started? Continue to the [Quickstart Guide](/quickstart).
|
||||
111
docs/mint.json
Normal file
111
docs/mint.json
Normal file
@@ -0,0 +1,111 @@
|
||||
{
|
||||
"name": "T6 Mem0 v2",
|
||||
"logo": {
|
||||
"dark": "/logo/dark.svg",
|
||||
"light": "/logo/light.svg"
|
||||
},
|
||||
"favicon": "/favicon.svg",
|
||||
"colors": {
|
||||
"primary": "#0D9373",
|
||||
"light": "#07C983",
|
||||
"dark": "#0D9373",
|
||||
"anchors": {
|
||||
"from": "#0D9373",
|
||||
"to": "#07C983"
|
||||
}
|
||||
},
|
||||
"topbarLinks": [
|
||||
{
|
||||
"name": "Support",
|
||||
"url": "mailto:support@example.com"
|
||||
}
|
||||
],
|
||||
"topbarCtaButton": {
|
||||
"name": "Dashboard",
|
||||
"url": "https://git.colsys.tech/klas/t6_mem0_v2"
|
||||
},
|
||||
"tabs": [
|
||||
{
|
||||
"name": "API Reference",
|
||||
"url": "api-reference"
|
||||
},
|
||||
{
|
||||
"name": "MCP Integration",
|
||||
"url": "mcp"
|
||||
}
|
||||
],
|
||||
"anchors": [
|
||||
{
|
||||
"name": "GitHub",
|
||||
"icon": "github",
|
||||
"url": "https://git.colsys.tech/klas/t6_mem0_v2"
|
||||
},
|
||||
{
|
||||
"name": "mem0.ai",
|
||||
"icon": "link",
|
||||
"url": "https://mem0.ai"
|
||||
}
|
||||
],
|
||||
"navigation": [
|
||||
{
|
||||
"group": "Get Started",
|
||||
"pages": [
|
||||
"introduction",
|
||||
"quickstart",
|
||||
"architecture"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Setup",
|
||||
"pages": [
|
||||
"setup/installation",
|
||||
"setup/configuration",
|
||||
"setup/supabase",
|
||||
"setup/neo4j"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "API Documentation",
|
||||
"pages": [
|
||||
"api-reference/introduction",
|
||||
"api-reference/authentication"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Memory Operations",
|
||||
"pages": [
|
||||
"api-reference/memories/add",
|
||||
"api-reference/memories/search",
|
||||
"api-reference/memories/get",
|
||||
"api-reference/memories/update",
|
||||
"api-reference/memories/delete"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "System",
|
||||
"pages": [
|
||||
"api-reference/health",
|
||||
"api-reference/stats"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "MCP Server",
|
||||
"pages": [
|
||||
"mcp/introduction",
|
||||
"mcp/installation",
|
||||
"mcp/tools"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Examples",
|
||||
"pages": [
|
||||
"examples/claude-code",
|
||||
"examples/n8n",
|
||||
"examples/python"
|
||||
]
|
||||
}
|
||||
],
|
||||
"footerSocials": {
|
||||
"github": "https://git.colsys.tech/klas/t6_mem0_v2"
|
||||
}
|
||||
}
|
||||
259
docs/quickstart.mdx
Normal file
259
docs/quickstart.mdx
Normal file
@@ -0,0 +1,259 @@
|
||||
---
|
||||
title: 'Quickstart'
|
||||
description: 'Get T6 Mem0 v2 running in 5 minutes'
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, ensure you have:
|
||||
|
||||
- Docker and Docker Compose installed
|
||||
- Existing Supabase instance (PostgreSQL with pgvector)
|
||||
- OpenAI API key
|
||||
- Git access to the repository
|
||||
|
||||
<Check>
|
||||
**Ready to go?** Let's set up your memory system!
|
||||
</Check>
|
||||
|
||||
## Step 1: Clone Repository
|
||||
|
||||
```bash
|
||||
git clone https://git.colsys.tech/klas/t6_mem0_v2
|
||||
cd t6_mem0_v2
|
||||
```
|
||||
|
||||
## Step 2: Configure Environment
|
||||
|
||||
Create `.env` file from template:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Edit `.env` with your credentials:
|
||||
|
||||
```bash
|
||||
# OpenAI Configuration
|
||||
OPENAI_API_KEY=sk-your-openai-api-key-here
|
||||
|
||||
# Supabase Configuration (your existing instance)
|
||||
SUPABASE_CONNECTION_STRING=postgresql://supabase_admin:password@172.21.0.12:5432/postgres
|
||||
|
||||
# Neo4j Configuration
|
||||
NEO4J_PASSWORD=your-secure-neo4j-password
|
||||
|
||||
# API Configuration
|
||||
API_KEY=your-secure-api-key-here
|
||||
```
|
||||
|
||||
<Warning>
|
||||
**Important**: Replace all placeholder values with your actual credentials. Never commit the `.env` file to version control!
|
||||
</Warning>
|
||||
|
||||
## Step 3: Apply Database Migrations
|
||||
|
||||
Run the Supabase migration to set up the vector store:
|
||||
|
||||
### Option A: Using Supabase SQL Editor (Recommended)
|
||||
|
||||
1. Open your Supabase dashboard
|
||||
2. Navigate to **SQL Editor**
|
||||
3. Copy contents from `migrations/supabase/001_init_vector_store.sql`
|
||||
4. Paste and execute
|
||||
|
||||
### Option B: Using psql
|
||||
|
||||
```bash
|
||||
psql "$SUPABASE_CONNECTION_STRING" -f migrations/supabase/001_init_vector_store.sql
|
||||
```
|
||||
|
||||
<Tip>
|
||||
The migration creates tables, indexes, and functions needed for vector similarity search. See [Supabase Setup](/setup/supabase) for details.
|
||||
</Tip>
|
||||
|
||||
## Step 4: Start Services
|
||||
|
||||
Launch all services with Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This starts:
|
||||
- **Neo4j** (ports 7474, 7687)
|
||||
- **REST API** (port 8080)
|
||||
- **MCP Server** (port 8765)
|
||||
|
||||
## Step 5: Verify Installation
|
||||
|
||||
Check service health:
|
||||
|
||||
```bash
|
||||
# Check API health
|
||||
curl http://localhost:8080/v1/health
|
||||
|
||||
# Expected response:
|
||||
# {
|
||||
# "status": "healthy",
|
||||
# "version": "0.1.0",
|
||||
# "dependencies": {
|
||||
# "mem0": "healthy"
|
||||
# }
|
||||
# }
|
||||
```
|
||||
|
||||
<Check>
|
||||
**Success!** All services are running. Let's try using the memory system.
|
||||
</Check>
|
||||
|
||||
## Step 6: Add Your First Memory
|
||||
|
||||
### Using REST API
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/v1/memories/ \
|
||||
-H "Authorization: Bearer YOUR_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"messages": [
|
||||
{"role": "user", "content": "I love pizza with mushrooms and olives"}
|
||||
],
|
||||
"user_id": "alice"
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"memories": [
|
||||
{
|
||||
"id": "mem_abc123",
|
||||
"memory": "User loves pizza with mushrooms and olives",
|
||||
"user_id": "alice",
|
||||
"created_at": "2025-10-13T12:00:00Z"
|
||||
}
|
||||
],
|
||||
"message": "Successfully added 1 memory(ies)"
|
||||
}
|
||||
```
|
||||
|
||||
## Step 7: Search Memories
|
||||
|
||||
```bash
|
||||
curl -X GET "http://localhost:8080/v1/memories/search?query=What food does Alice like?&user_id=alice" \
|
||||
-H "Authorization: Bearer YOUR_API_KEY"
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"memories": [
|
||||
{
|
||||
"id": "mem_abc123",
|
||||
"memory": "User loves pizza with mushrooms and olives",
|
||||
"user_id": "alice",
|
||||
"score": 0.95,
|
||||
"created_at": "2025-10-13T12:00:00Z"
|
||||
}
|
||||
],
|
||||
"count": 1
|
||||
}
|
||||
```
|
||||
|
||||
<Success>
|
||||
**Congratulations!** Your memory system is working. The AI remembered Alice's food preferences and retrieved them semantically.
|
||||
</Success>
|
||||
|
||||
## Step 8: Explore Neo4j (Optional)
|
||||
|
||||
View memory relationships in Neo4j Browser:
|
||||
|
||||
1. Open http://localhost:7474 in your browser
|
||||
2. Login with:
|
||||
- **Username**: `neo4j`
|
||||
- **Password**: (from your `.env` NEO4J_PASSWORD)
|
||||
3. Run query:
|
||||
|
||||
```cypher
|
||||
MATCH (n) RETURN n LIMIT 25
|
||||
```
|
||||
|
||||
<Tip>
|
||||
Neo4j visualizes relationships between memories, entities, and concepts extracted by mem0.
|
||||
</Tip>
|
||||
|
||||
## Next Steps
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Configure MCP Server"
|
||||
icon="plug"
|
||||
href="/mcp/installation"
|
||||
>
|
||||
Connect with Claude Code for AI assistant integration
|
||||
</Card>
|
||||
<Card
|
||||
title="API Reference"
|
||||
icon="book"
|
||||
href="/api-reference/introduction"
|
||||
>
|
||||
Explore all available endpoints
|
||||
</Card>
|
||||
<Card
|
||||
title="Architecture"
|
||||
icon="sitemap"
|
||||
href="/architecture"
|
||||
>
|
||||
Understand the system design
|
||||
</Card>
|
||||
<Card
|
||||
title="Examples"
|
||||
icon="code"
|
||||
href="/examples/n8n"
|
||||
>
|
||||
See integration examples
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Common Issues
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Connection to Supabase fails">
|
||||
- Verify `SUPABASE_CONNECTION_STRING` is correct
|
||||
- Ensure Supabase is accessible from Docker network
|
||||
- Check if pgvector extension is enabled
|
||||
- Run migration script if tables don't exist
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Neo4j won't start">
|
||||
- Check if ports 7474 and 7687 are available
|
||||
- Verify `NEO4J_PASSWORD` is set in `.env`
|
||||
- Check Docker logs: `docker logs t6-mem0-neo4j`
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="API returns authentication error">
|
||||
- Verify `API_KEY` in `.env` matches request header
|
||||
- Ensure Authorization header format: `Bearer YOUR_API_KEY`
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="OpenAI errors">
|
||||
- Check `OPENAI_API_KEY` is valid
|
||||
- Verify API key has sufficient credits
|
||||
- Check internet connectivity from containers
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Getting Help
|
||||
|
||||
- Review [Architecture documentation](/architecture)
|
||||
- Check [API Reference](/api-reference/introduction)
|
||||
- See [Setup guides](/setup/installation)
|
||||
|
||||
---
|
||||
|
||||
**Ready for production?** Continue to [Configuration](/setup/configuration) for advanced settings.
|
||||
0
mcp-server/__init__.py
Normal file
0
mcp-server/__init__.py
Normal file
165
mcp-server/main.py
Normal file
165
mcp-server/main.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""
|
||||
T6 Mem0 v2 MCP Server
|
||||
Model Context Protocol server for memory operations
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import asyncio
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import (
|
||||
Resource,
|
||||
Tool,
|
||||
TextContent,
|
||||
ImageContent,
|
||||
EmbeddedResource
|
||||
)
|
||||
from mem0 import Memory
|
||||
|
||||
from config import mem0_config, settings
|
||||
from mcp_server.tools import MemoryTools
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, settings.log_level.upper()),
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[logging.StreamHandler(sys.stderr)] # MCP uses stderr for logs
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class T6Mem0Server:
|
||||
"""T6 Mem0 v2 MCP Server"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize MCP server"""
|
||||
self.server = Server("t6-mem0-v2")
|
||||
self.memory: Memory | None = None
|
||||
self.tools: MemoryTools | None = None
|
||||
|
||||
# Setup handlers
|
||||
self.setup_handlers()
|
||||
|
||||
def setup_handlers(self):
|
||||
"""Setup MCP server handlers"""
|
||||
|
||||
@self.server.list_resources()
|
||||
async def list_resources() -> list[Resource]:
|
||||
"""
|
||||
List available resources (for future extension)
|
||||
"""
|
||||
return []
|
||||
|
||||
@self.server.read_resource()
|
||||
async def read_resource(uri: str) -> str:
|
||||
"""
|
||||
Read resource by URI (for future extension)
|
||||
"""
|
||||
logger.warning(f"Resource read not implemented: {uri}")
|
||||
return ""
|
||||
|
||||
@self.server.list_tools()
|
||||
async def list_tools() -> list[Tool]:
|
||||
"""List available memory tools"""
|
||||
logger.info("Listing tools")
|
||||
if not self.tools:
|
||||
raise RuntimeError("Tools not initialized")
|
||||
return self.tools.get_tool_definitions()
|
||||
|
||||
@self.server.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[TextContent | ImageContent | EmbeddedResource]:
|
||||
"""
|
||||
Handle tool calls
|
||||
|
||||
Args:
|
||||
name: Tool name
|
||||
arguments: Tool arguments
|
||||
|
||||
Returns:
|
||||
Tool response
|
||||
"""
|
||||
logger.info(f"Tool called: {name}")
|
||||
logger.debug(f"Arguments: {arguments}")
|
||||
|
||||
if not self.tools:
|
||||
raise RuntimeError("Tools not initialized")
|
||||
|
||||
# Route to appropriate handler
|
||||
handlers = {
|
||||
"add_memory": self.tools.handle_add_memory,
|
||||
"search_memories": self.tools.handle_search_memories,
|
||||
"get_memory": self.tools.handle_get_memory,
|
||||
"get_all_memories": self.tools.handle_get_all_memories,
|
||||
"update_memory": self.tools.handle_update_memory,
|
||||
"delete_memory": self.tools.handle_delete_memory,
|
||||
"delete_all_memories": self.tools.handle_delete_all_memories,
|
||||
}
|
||||
|
||||
handler = handlers.get(name)
|
||||
if not handler:
|
||||
logger.error(f"Unknown tool: {name}")
|
||||
return [TextContent(type="text", text=f"Error: Unknown tool '{name}'")]
|
||||
|
||||
try:
|
||||
return await handler(arguments)
|
||||
except Exception as e:
|
||||
logger.error(f"Tool execution failed: {e}", exc_info=True)
|
||||
return [TextContent(type="text", text=f"Error executing tool: {str(e)}")]
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize memory service"""
|
||||
logger.info("Initializing T6 Mem0 v2 MCP Server")
|
||||
logger.info(f"Environment: {settings.environment}")
|
||||
|
||||
try:
|
||||
# Initialize Mem0
|
||||
logger.info("Initializing Mem0...")
|
||||
self.memory = Memory.from_config(config_dict=mem0_config)
|
||||
logger.info("Mem0 initialized successfully")
|
||||
|
||||
# Initialize tools
|
||||
self.tools = MemoryTools(self.memory)
|
||||
logger.info("Tools initialized successfully")
|
||||
|
||||
logger.info("T6 Mem0 v2 MCP Server ready")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize server: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
async def run(self):
|
||||
"""Run the MCP server"""
|
||||
try:
|
||||
# Initialize before running
|
||||
await self.initialize()
|
||||
|
||||
# Run server with stdio transport
|
||||
logger.info("Starting MCP server with stdio transport")
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await self.server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
self.server.create_initialization_options()
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Server error: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
try:
|
||||
server = T6Mem0Server()
|
||||
await server.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Server stopped by user")
|
||||
except Exception as e:
|
||||
logger.error(f"Fatal error: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
13
mcp-server/run.sh
Executable file
13
mcp-server/run.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
# Run T6 Mem0 v2 MCP Server
|
||||
|
||||
set -e
|
||||
|
||||
# Load environment variables
|
||||
if [ -f ../.env ]; then
|
||||
export $(cat ../.env | grep -v '^#' | xargs)
|
||||
fi
|
||||
|
||||
# Run MCP server
|
||||
echo "Starting T6 Mem0 v2 MCP Server..."
|
||||
python -m mcp_server.main
|
||||
343
mcp-server/tools.py
Normal file
343
mcp-server/tools.py
Normal file
@@ -0,0 +1,343 @@
|
||||
"""
|
||||
MCP tools for T6 Mem0 v2
|
||||
Tool definitions and handlers
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
from mcp.types import Tool, TextContent
|
||||
from mem0 import Memory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MemoryTools:
|
||||
"""MCP tools for memory operations"""
|
||||
|
||||
def __init__(self, memory: Memory):
|
||||
"""
|
||||
Initialize memory tools
|
||||
|
||||
Args:
|
||||
memory: Mem0 instance
|
||||
"""
|
||||
self.memory = memory
|
||||
|
||||
def get_tool_definitions(self) -> List[Tool]:
|
||||
"""
|
||||
Get MCP tool definitions
|
||||
|
||||
Returns:
|
||||
List of Tool definitions
|
||||
"""
|
||||
return [
|
||||
Tool(
|
||||
name="add_memory",
|
||||
description="Add new memory from messages. Extracts and stores important information from conversation.",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"messages": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"role": {"type": "string", "enum": ["user", "assistant", "system"]},
|
||||
"content": {"type": "string"}
|
||||
},
|
||||
"required": ["role", "content"]
|
||||
},
|
||||
"description": "Conversation messages to extract memory from"
|
||||
},
|
||||
"user_id": {
|
||||
"type": "string",
|
||||
"description": "User identifier (optional)"
|
||||
},
|
||||
"agent_id": {
|
||||
"type": "string",
|
||||
"description": "Agent identifier (optional)"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"description": "Additional metadata (optional)"
|
||||
}
|
||||
},
|
||||
"required": ["messages"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="search_memories",
|
||||
description="Search memories by semantic similarity. Find relevant memories based on a query.",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query"
|
||||
},
|
||||
"user_id": {
|
||||
"type": "string",
|
||||
"description": "Filter by user ID (optional)"
|
||||
},
|
||||
"agent_id": {
|
||||
"type": "string",
|
||||
"description": "Filter by agent ID (optional)"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"maximum": 50,
|
||||
"default": 10,
|
||||
"description": "Maximum number of results"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_memory",
|
||||
description="Get a specific memory by its ID",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"memory_id": {
|
||||
"type": "string",
|
||||
"description": "Memory identifier"
|
||||
}
|
||||
},
|
||||
"required": ["memory_id"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_all_memories",
|
||||
description="Get all memories for a user or agent",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user_id": {
|
||||
"type": "string",
|
||||
"description": "User identifier (optional)"
|
||||
},
|
||||
"agent_id": {
|
||||
"type": "string",
|
||||
"description": "Agent identifier (optional)"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="update_memory",
|
||||
description="Update an existing memory's content",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"memory_id": {
|
||||
"type": "string",
|
||||
"description": "Memory identifier"
|
||||
},
|
||||
"data": {
|
||||
"type": "string",
|
||||
"description": "New memory content"
|
||||
}
|
||||
},
|
||||
"required": ["memory_id", "data"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="delete_memory",
|
||||
description="Delete a specific memory by ID",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"memory_id": {
|
||||
"type": "string",
|
||||
"description": "Memory identifier"
|
||||
}
|
||||
},
|
||||
"required": ["memory_id"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="delete_all_memories",
|
||||
description="Delete all memories for a user or agent. Use with caution!",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user_id": {
|
||||
"type": "string",
|
||||
"description": "User identifier (optional)"
|
||||
},
|
||||
"agent_id": {
|
||||
"type": "string",
|
||||
"description": "Agent identifier (optional)"
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
async def handle_add_memory(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle add_memory tool call"""
|
||||
try:
|
||||
messages = arguments.get("messages", [])
|
||||
user_id = arguments.get("user_id")
|
||||
agent_id = arguments.get("agent_id")
|
||||
metadata = arguments.get("metadata", {})
|
||||
|
||||
result = self.memory.add(
|
||||
messages=messages,
|
||||
user_id=user_id,
|
||||
agent_id=agent_id,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
memories = result.get('results', [])
|
||||
response = f"Successfully added {len(memories)} memory(ies):\n\n"
|
||||
|
||||
for mem in memories:
|
||||
response += f"- {mem.get('memory', mem.get('data', 'N/A'))}\n"
|
||||
response += f" ID: {mem.get('id', 'N/A')}\n\n"
|
||||
|
||||
return [TextContent(type="text", text=response)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding memory: {e}")
|
||||
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
||||
|
||||
async def handle_search_memories(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle search_memories tool call"""
|
||||
try:
|
||||
query = arguments.get("query")
|
||||
user_id = arguments.get("user_id")
|
||||
agent_id = arguments.get("agent_id")
|
||||
limit = arguments.get("limit", 10)
|
||||
|
||||
memories = self.memory.search(
|
||||
query=query,
|
||||
user_id=user_id,
|
||||
agent_id=agent_id,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
if not memories:
|
||||
return [TextContent(type="text", text="No memories found matching your query.")]
|
||||
|
||||
response = f"Found {len(memories)} relevant memory(ies):\n\n"
|
||||
|
||||
for i, mem in enumerate(memories, 1):
|
||||
response += f"{i}. {mem.get('memory', mem.get('data', 'N/A'))}\n"
|
||||
response += f" ID: {mem.get('id', 'N/A')}\n"
|
||||
if 'score' in mem:
|
||||
response += f" Relevance: {mem['score']:.2%}\n"
|
||||
response += "\n"
|
||||
|
||||
return [TextContent(type="text", text=response)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching memories: {e}")
|
||||
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
||||
|
||||
async def handle_get_memory(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle get_memory tool call"""
|
||||
try:
|
||||
memory_id = arguments.get("memory_id")
|
||||
|
||||
memory = self.memory.get(memory_id=memory_id)
|
||||
|
||||
if not memory:
|
||||
return [TextContent(type="text", text=f"Memory not found: {memory_id}")]
|
||||
|
||||
response = f"Memory Details:\n\n"
|
||||
response += f"ID: {memory.get('id', 'N/A')}\n"
|
||||
response += f"Content: {memory.get('memory', memory.get('data', 'N/A'))}\n"
|
||||
|
||||
if memory.get('user_id'):
|
||||
response += f"User ID: {memory['user_id']}\n"
|
||||
if memory.get('agent_id'):
|
||||
response += f"Agent ID: {memory['agent_id']}\n"
|
||||
if memory.get('metadata'):
|
||||
response += f"Metadata: {memory['metadata']}\n"
|
||||
|
||||
return [TextContent(type="text", text=response)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting memory: {e}")
|
||||
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
||||
|
||||
async def handle_get_all_memories(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle get_all_memories tool call"""
|
||||
try:
|
||||
user_id = arguments.get("user_id")
|
||||
agent_id = arguments.get("agent_id")
|
||||
|
||||
memories = self.memory.get_all(
|
||||
user_id=user_id,
|
||||
agent_id=agent_id
|
||||
)
|
||||
|
||||
if not memories:
|
||||
return [TextContent(type="text", text="No memories found.")]
|
||||
|
||||
response = f"Retrieved {len(memories)} memory(ies):\n\n"
|
||||
|
||||
for i, mem in enumerate(memories, 1):
|
||||
response += f"{i}. {mem.get('memory', mem.get('data', 'N/A'))}\n"
|
||||
response += f" ID: {mem.get('id', 'N/A')}\n\n"
|
||||
|
||||
return [TextContent(type="text", text=response)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting all memories: {e}")
|
||||
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
||||
|
||||
async def handle_update_memory(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle update_memory tool call"""
|
||||
try:
|
||||
memory_id = arguments.get("memory_id")
|
||||
data = arguments.get("data")
|
||||
|
||||
result = self.memory.update(
|
||||
memory_id=memory_id,
|
||||
data=data
|
||||
)
|
||||
|
||||
response = f"Memory updated successfully:\n\n"
|
||||
response += f"ID: {result.get('id', memory_id)}\n"
|
||||
response += f"New Content: {data}\n"
|
||||
|
||||
return [TextContent(type="text", text=response)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating memory: {e}")
|
||||
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
||||
|
||||
async def handle_delete_memory(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle delete_memory tool call"""
|
||||
try:
|
||||
memory_id = arguments.get("memory_id")
|
||||
|
||||
self.memory.delete(memory_id=memory_id)
|
||||
|
||||
return [TextContent(type="text", text=f"Memory {memory_id} deleted successfully.")]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting memory: {e}")
|
||||
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
||||
|
||||
async def handle_delete_all_memories(self, arguments: Dict[str, Any]) -> List[TextContent]:
|
||||
"""Handle delete_all_memories tool call"""
|
||||
try:
|
||||
user_id = arguments.get("user_id")
|
||||
agent_id = arguments.get("agent_id")
|
||||
|
||||
self.memory.delete_all(
|
||||
user_id=user_id,
|
||||
agent_id=agent_id
|
||||
)
|
||||
|
||||
filter_str = f"user_id={user_id}" if user_id else f"agent_id={agent_id}" if agent_id else "all filters"
|
||||
return [TextContent(type="text", text=f"All memories deleted for {filter_str}.")]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting all memories: {e}")
|
||||
return [TextContent(type="text", text=f"Error: {str(e)}")]
|
||||
172
migrations/supabase/001_init_vector_store.sql
Normal file
172
migrations/supabase/001_init_vector_store.sql
Normal file
@@ -0,0 +1,172 @@
|
||||
-- T6 Mem0 v2 - Initial Vector Store Setup
|
||||
-- This migration creates the necessary tables and functions for Mem0 vector storage
|
||||
|
||||
-- Enable pgvector extension
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
|
||||
-- Create memories table
|
||||
CREATE TABLE IF NOT EXISTS t6_memories (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
embedding vector(1536), -- OpenAI text-embedding-3-small dimension
|
||||
metadata JSONB NOT NULL DEFAULT '{}'::JSONB,
|
||||
user_id TEXT,
|
||||
agent_id TEXT,
|
||||
run_id TEXT,
|
||||
memory_text TEXT NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
hash TEXT, -- For deduplication
|
||||
|
||||
-- Indexes
|
||||
CONSTRAINT t6_memories_hash_unique UNIQUE (hash)
|
||||
);
|
||||
|
||||
-- Create index on embedding using HNSW for fast similarity search
|
||||
CREATE INDEX IF NOT EXISTS t6_memories_embedding_idx
|
||||
ON t6_memories
|
||||
USING hnsw (embedding vector_cosine_ops);
|
||||
|
||||
-- Create indexes on metadata fields for filtering
|
||||
CREATE INDEX IF NOT EXISTS t6_memories_user_id_idx
|
||||
ON t6_memories (user_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS t6_memories_agent_id_idx
|
||||
ON t6_memories (agent_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS t6_memories_run_id_idx
|
||||
ON t6_memories (run_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS t6_memories_created_at_idx
|
||||
ON t6_memories (created_at DESC);
|
||||
|
||||
-- Create GIN index on metadata for JSON queries
|
||||
CREATE INDEX IF NOT EXISTS t6_memories_metadata_idx
|
||||
ON t6_memories
|
||||
USING GIN (metadata);
|
||||
|
||||
-- Create full-text search index on memory_text
|
||||
CREATE INDEX IF NOT EXISTS t6_memories_text_search_idx
|
||||
ON t6_memories
|
||||
USING GIN (to_tsvector('english', memory_text));
|
||||
|
||||
-- Function to update the updated_at timestamp
|
||||
CREATE OR REPLACE FUNCTION update_t6_memories_updated_at()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to automatically update updated_at
|
||||
CREATE TRIGGER t6_memories_updated_at_trigger
|
||||
BEFORE UPDATE ON t6_memories
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_t6_memories_updated_at();
|
||||
|
||||
-- Function for vector similarity search with filters
|
||||
CREATE OR REPLACE FUNCTION match_t6_memories(
|
||||
query_embedding vector(1536),
|
||||
match_count INT DEFAULT 10,
|
||||
filter_user_id TEXT DEFAULT NULL,
|
||||
filter_agent_id TEXT DEFAULT NULL,
|
||||
filter_run_id TEXT DEFAULT NULL
|
||||
)
|
||||
RETURNS TABLE (
|
||||
id UUID,
|
||||
memory_text TEXT,
|
||||
metadata JSONB,
|
||||
user_id TEXT,
|
||||
agent_id TEXT,
|
||||
run_id TEXT,
|
||||
similarity FLOAT,
|
||||
created_at TIMESTAMP WITH TIME ZONE
|
||||
)
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
t6_memories.id,
|
||||
t6_memories.memory_text,
|
||||
t6_memories.metadata,
|
||||
t6_memories.user_id,
|
||||
t6_memories.agent_id,
|
||||
t6_memories.run_id,
|
||||
1 - (t6_memories.embedding <=> query_embedding) AS similarity,
|
||||
t6_memories.created_at
|
||||
FROM t6_memories
|
||||
WHERE
|
||||
(filter_user_id IS NULL OR t6_memories.user_id = filter_user_id) AND
|
||||
(filter_agent_id IS NULL OR t6_memories.agent_id = filter_agent_id) AND
|
||||
(filter_run_id IS NULL OR t6_memories.run_id = filter_run_id)
|
||||
ORDER BY t6_memories.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Function to get memory statistics
|
||||
CREATE OR REPLACE FUNCTION get_t6_memory_stats()
|
||||
RETURNS TABLE (
|
||||
total_memories BIGINT,
|
||||
total_users BIGINT,
|
||||
total_agents BIGINT,
|
||||
avg_memories_per_user NUMERIC,
|
||||
oldest_memory TIMESTAMP WITH TIME ZONE,
|
||||
newest_memory TIMESTAMP WITH TIME ZONE
|
||||
)
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
COUNT(*)::BIGINT AS total_memories,
|
||||
COUNT(DISTINCT user_id)::BIGINT AS total_users,
|
||||
COUNT(DISTINCT agent_id)::BIGINT AS total_agents,
|
||||
CASE
|
||||
WHEN COUNT(DISTINCT user_id) > 0
|
||||
THEN ROUND(COUNT(*)::NUMERIC / COUNT(DISTINCT user_id), 2)
|
||||
ELSE 0
|
||||
END AS avg_memories_per_user,
|
||||
MIN(created_at) AS oldest_memory,
|
||||
MAX(created_at) AS newest_memory
|
||||
FROM t6_memories;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create a view for recent memories
|
||||
CREATE OR REPLACE VIEW t6_recent_memories AS
|
||||
SELECT
|
||||
id,
|
||||
user_id,
|
||||
agent_id,
|
||||
run_id,
|
||||
memory_text,
|
||||
metadata,
|
||||
created_at,
|
||||
updated_at
|
||||
FROM t6_memories
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 100;
|
||||
|
||||
-- Grant necessary permissions (adjust as needed for your setup)
|
||||
-- GRANT SELECT, INSERT, UPDATE, DELETE ON t6_memories TO authenticated;
|
||||
-- GRANT EXECUTE ON FUNCTION match_t6_memories TO authenticated;
|
||||
-- GRANT EXECUTE ON FUNCTION get_t6_memory_stats TO authenticated;
|
||||
|
||||
-- Comments for documentation
|
||||
COMMENT ON TABLE t6_memories IS 'Storage for T6 Mem0 v2 memory vectors and metadata';
|
||||
COMMENT ON COLUMN t6_memories.embedding IS 'OpenAI text-embedding-3-small vector (1536 dimensions)';
|
||||
COMMENT ON COLUMN t6_memories.metadata IS 'Flexible JSON metadata for additional memory properties';
|
||||
COMMENT ON COLUMN t6_memories.hash IS 'Hash for deduplication of identical memories';
|
||||
COMMENT ON FUNCTION match_t6_memories IS 'Performs cosine similarity search on memory embeddings with optional filters';
|
||||
COMMENT ON FUNCTION get_t6_memory_stats IS 'Returns statistics about stored memories';
|
||||
|
||||
-- Success message
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'T6 Mem0 v2 vector store initialized successfully!';
|
||||
RAISE NOTICE 'Table: t6_memories';
|
||||
RAISE NOTICE 'Functions: match_t6_memories, get_t6_memory_stats';
|
||||
RAISE NOTICE 'View: t6_recent_memories';
|
||||
END $$;
|
||||
153
migrations/supabase/README.md
Normal file
153
migrations/supabase/README.md
Normal file
@@ -0,0 +1,153 @@
|
||||
# Supabase Migrations for T6 Mem0 v2
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains SQL migrations for setting up the Supabase vector store used by T6 Mem0 v2.
|
||||
|
||||
## Migrations
|
||||
|
||||
### 001_init_vector_store.sql
|
||||
|
||||
Initial setup migration that creates:
|
||||
|
||||
- **pgvector extension**: Enables vector similarity search
|
||||
- **t6_memories table**: Main storage for memory vectors and metadata
|
||||
- **Indexes**: HNSW for vectors, B-tree for filters, GIN for JSONB
|
||||
- **Functions**:
|
||||
- `match_t6_memories()`: Vector similarity search with filters
|
||||
- `get_t6_memory_stats()`: Memory statistics
|
||||
- `update_t6_memories_updated_at()`: Auto-update timestamp
|
||||
- **View**: `t6_recent_memories` for quick access to recent entries
|
||||
|
||||
## Applying Migrations
|
||||
|
||||
### Method 1: Supabase SQL Editor (Recommended)
|
||||
|
||||
1. Open your Supabase project dashboard
|
||||
2. Navigate to SQL Editor
|
||||
3. Create a new query
|
||||
4. Copy and paste the contents of `001_init_vector_store.sql`
|
||||
5. Click "Run" to execute
|
||||
|
||||
### Method 2: psql Command Line
|
||||
|
||||
```bash
|
||||
# Connect to your Supabase database
|
||||
psql "postgresql://supabase_admin:PASSWORD@172.21.0.12:5432/postgres"
|
||||
|
||||
# Run the migration
|
||||
\i migrations/supabase/001_init_vector_store.sql
|
||||
```
|
||||
|
||||
### Method 3: Programmatic Application
|
||||
|
||||
```python
|
||||
import psycopg2
|
||||
|
||||
# Connect to Supabase
|
||||
conn = psycopg2.connect(
|
||||
"postgresql://supabase_admin:PASSWORD@172.21.0.12:5432/postgres"
|
||||
)
|
||||
|
||||
# Read and execute migration
|
||||
with open('migrations/supabase/001_init_vector_store.sql', 'r') as f:
|
||||
migration_sql = f.read()
|
||||
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(migration_sql)
|
||||
conn.commit()
|
||||
|
||||
conn.close()
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
After applying the migration, verify the setup:
|
||||
|
||||
```sql
|
||||
-- Check if pgvector extension is enabled
|
||||
SELECT * FROM pg_extension WHERE extname = 'vector';
|
||||
|
||||
-- Check if table exists
|
||||
\d t6_memories
|
||||
|
||||
-- Verify indexes
|
||||
\di t6_memories*
|
||||
|
||||
-- Test the similarity search function
|
||||
SELECT * FROM match_t6_memories(
|
||||
'[0.1, 0.2, ...]'::vector(1536), -- Sample embedding
|
||||
10, -- Match count
|
||||
'test_user', -- User ID filter
|
||||
NULL, -- Agent ID filter
|
||||
NULL -- Run ID filter
|
||||
);
|
||||
|
||||
-- Get memory statistics
|
||||
SELECT * FROM get_t6_memory_stats();
|
||||
```
|
||||
|
||||
## Rollback
|
||||
|
||||
If you need to rollback the migration:
|
||||
|
||||
```sql
|
||||
-- Drop view
|
||||
DROP VIEW IF EXISTS t6_recent_memories;
|
||||
|
||||
-- Drop functions
|
||||
DROP FUNCTION IF EXISTS get_t6_memory_stats();
|
||||
DROP FUNCTION IF EXISTS match_t6_memories(vector, INT, TEXT, TEXT, TEXT);
|
||||
DROP FUNCTION IF EXISTS update_t6_memories_updated_at();
|
||||
|
||||
-- Drop trigger
|
||||
DROP TRIGGER IF EXISTS t6_memories_updated_at_trigger ON t6_memories;
|
||||
|
||||
-- Drop table (WARNING: This will delete all data!)
|
||||
DROP TABLE IF EXISTS t6_memories CASCADE;
|
||||
|
||||
-- Optionally remove extension (only if not used elsewhere)
|
||||
-- DROP EXTENSION IF EXISTS vector CASCADE;
|
||||
```
|
||||
|
||||
## Schema
|
||||
|
||||
### t6_memories Table
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| id | UUID | Primary key |
|
||||
| embedding | vector(1536) | OpenAI embedding vector |
|
||||
| metadata | JSONB | Flexible metadata |
|
||||
| user_id | TEXT | User identifier |
|
||||
| agent_id | TEXT | Agent identifier |
|
||||
| run_id | TEXT | Run identifier |
|
||||
| memory_text | TEXT | Original memory text |
|
||||
| created_at | TIMESTAMPTZ | Creation timestamp |
|
||||
| updated_at | TIMESTAMPTZ | Last update timestamp |
|
||||
| hash | TEXT | Deduplication hash (unique) |
|
||||
|
||||
### Indexes
|
||||
|
||||
- **t6_memories_embedding_idx**: HNSW index for fast vector search
|
||||
- **t6_memories_user_id_idx**: B-tree for user filtering
|
||||
- **t6_memories_agent_id_idx**: B-tree for agent filtering
|
||||
- **t6_memories_run_id_idx**: B-tree for run filtering
|
||||
- **t6_memories_created_at_idx**: B-tree for time-based queries
|
||||
- **t6_memories_metadata_idx**: GIN for JSON queries
|
||||
- **t6_memories_text_search_idx**: GIN for full-text search
|
||||
|
||||
## Notes
|
||||
|
||||
- The HNSW index provides O(log n) approximate nearest neighbor search
|
||||
- Cosine distance is used for similarity (1 - cosine similarity)
|
||||
- All timestamps are stored in UTC
|
||||
- The hash column ensures deduplication of identical memories
|
||||
- Metadata is stored as JSONB for flexible schema evolution
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions about migrations, refer to:
|
||||
- [Supabase Vector Documentation](https://supabase.com/docs/guides/database/extensions/pgvector)
|
||||
- [pgvector Documentation](https://github.com/pgvector/pgvector)
|
||||
- Project Architecture: `../../ARCHITECTURE.md`
|
||||
34
requirements.txt
Normal file
34
requirements.txt
Normal file
@@ -0,0 +1,34 @@
|
||||
# Core Memory System
|
||||
mem0ai[graph]==0.1.*
|
||||
|
||||
# Web Framework
|
||||
fastapi==0.115.*
|
||||
uvicorn[standard]==0.32.*
|
||||
pydantic==2.9.*
|
||||
pydantic-settings==2.6.*
|
||||
|
||||
# MCP Server
|
||||
mcp==1.3.*
|
||||
|
||||
# Database Drivers
|
||||
psycopg2-binary==2.9.*
|
||||
neo4j==5.26.*
|
||||
|
||||
# OpenAI
|
||||
openai==1.58.*
|
||||
|
||||
# Utilities
|
||||
python-dotenv==1.0.*
|
||||
httpx==0.28.*
|
||||
pyyaml==6.0.*
|
||||
|
||||
# Development
|
||||
pytest==8.3.*
|
||||
pytest-asyncio==0.24.*
|
||||
pytest-cov==6.0.*
|
||||
black==24.10.*
|
||||
ruff==0.8.*
|
||||
mypy==1.13.*
|
||||
|
||||
# Monitoring
|
||||
prometheus-client==0.21.*
|
||||
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
0
tests/api/__init__.py
Normal file
0
tests/api/__init__.py
Normal file
0
tests/integration/__init__.py
Normal file
0
tests/integration/__init__.py
Normal file
0
tests/mcp-server/__init__.py
Normal file
0
tests/mcp-server/__init__.py
Normal file
Reference in New Issue
Block a user