Major Changes: - Added Ollama as alternative LLM provider to OpenAI - Implemented flexible provider switching via environment variables - Support for multiple embedding models (OpenAI and Ollama) - Created comprehensive Ollama setup guide Configuration Changes (config.py): - Added LLM_PROVIDER and EMBEDDER_PROVIDER settings - Added Ollama configuration: base URL, LLM model, embedding model - Modified get_mem0_config() to dynamically switch providers - OpenAI API key now optional when using Ollama - Added validation to ensure required keys based on provider Supported Configurations: 1. Full OpenAI (default): - LLM_PROVIDER=openai - EMBEDDER_PROVIDER=openai 2. Full Ollama (local): - LLM_PROVIDER=ollama - EMBEDDER_PROVIDER=ollama 3. Hybrid configurations: - Ollama LLM + OpenAI embeddings - OpenAI LLM + Ollama embeddings Ollama Models Supported: - LLM: llama3.1:8b, llama3.1:70b, mistral:7b, codellama:7b, phi3:3.8b - Embeddings: nomic-embed-text, mxbai-embed-large, all-minilm Documentation: - Created docs/setup/ollama.mdx - Complete Ollama setup guide - Installation methods (host and Docker) - Model selection and comparison - Docker Compose configuration - Performance tuning and GPU acceleration - Migration guide from OpenAI - Troubleshooting section - Updated README.md with Ollama features - Updated .env.example with provider selection - Marked Phase 2 as complete in roadmap Environment Variables: - LLM_PROVIDER: Select LLM provider (openai/ollama) - EMBEDDER_PROVIDER: Select embedding provider (openai/ollama) - OLLAMA_BASE_URL: Ollama API endpoint (default: http://localhost:11434) - OLLAMA_LLM_MODEL: Ollama model for text generation - OLLAMA_EMBEDDING_MODEL: Ollama model for embeddings - MEM0_EMBEDDING_DIMS: Must match embedding model dimensions Breaking Changes: - None - defaults to OpenAI for backward compatibility Migration Notes: - When switching from OpenAI to Ollama embeddings, existing embeddings must be cleared due to dimension changes (1536 → 768 for nomic-embed-text) - Update MEM0_EMBEDDING_DIMS to match chosen embedding model Benefits: ✅ Cost savings - no API costs with local models ✅ Privacy - all data stays local ✅ Offline capability - works without internet ✅ Model variety - access to many open-source models ✅ Flexibility - easy switching between providers Version: 1.1.0 Status: Phase 2 Complete - Production Ready with Ollama Support 🤖 Generated with Claude Code Co-Authored-By: Claude <noreply@anthropic.com>
164 lines
5.2 KiB
Python
164 lines
5.2 KiB
Python
"""
|
|
Shared configuration for T6 Mem0 v2
|
|
Loads environment variables and creates Mem0 configuration
|
|
"""
|
|
|
|
import os
|
|
from typing import Dict, Any
|
|
from pydantic_settings import BaseSettings
|
|
from pydantic import Field
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
"""Application settings loaded from environment variables"""
|
|
|
|
# LLM Provider Selection
|
|
llm_provider: str = Field(default="openai", env="LLM_PROVIDER") # openai or ollama
|
|
embedder_provider: str = Field(default="openai", env="EMBEDDER_PROVIDER") # openai or ollama
|
|
|
|
# OpenAI
|
|
openai_api_key: str = Field(default="", env="OPENAI_API_KEY") # Optional if using Ollama
|
|
|
|
# Ollama
|
|
ollama_base_url: str = Field(default="http://localhost:11434", env="OLLAMA_BASE_URL")
|
|
ollama_llm_model: str = Field(default="llama3.1:8b", env="OLLAMA_LLM_MODEL")
|
|
ollama_embedding_model: str = Field(default="nomic-embed-text", env="OLLAMA_EMBEDDING_MODEL")
|
|
|
|
# Supabase
|
|
supabase_connection_string: str = Field(..., env="SUPABASE_CONNECTION_STRING")
|
|
|
|
# Neo4j
|
|
neo4j_uri: str = Field(..., env="NEO4J_URI")
|
|
neo4j_user: str = Field(default="neo4j", env="NEO4J_USER")
|
|
neo4j_password: str = Field(..., env="NEO4J_PASSWORD")
|
|
|
|
# API
|
|
api_host: str = Field(default="0.0.0.0", env="API_HOST")
|
|
api_port: int = Field(default=8080, env="API_PORT")
|
|
api_key: str = Field(..., env="API_KEY")
|
|
|
|
# MCP Server
|
|
mcp_host: str = Field(default="0.0.0.0", env="MCP_HOST")
|
|
mcp_port: int = Field(default=8765, env="MCP_PORT")
|
|
|
|
# Mem0
|
|
mem0_collection_name: str = Field(default="t6_memories", env="MEM0_COLLECTION_NAME")
|
|
mem0_embedding_dims: int = Field(default=1536, env="MEM0_EMBEDDING_DIMS")
|
|
mem0_version: str = Field(default="v1.1", env="MEM0_VERSION")
|
|
|
|
# Logging
|
|
log_level: str = Field(default="INFO", env="LOG_LEVEL")
|
|
log_format: str = Field(default="json", env="LOG_FORMAT")
|
|
|
|
# Environment
|
|
environment: str = Field(default="development", env="ENVIRONMENT")
|
|
|
|
# Docker (optional, for container deployments)
|
|
docker_network: str = Field(default="bridge", env="DOCKER_NETWORK")
|
|
|
|
class Config:
|
|
env_file = ".env"
|
|
env_file_encoding = "utf-8"
|
|
case_sensitive = False
|
|
|
|
|
|
def get_settings() -> Settings:
|
|
"""Get application settings"""
|
|
return Settings()
|
|
|
|
|
|
def get_mem0_config(settings: Settings) -> Dict[str, Any]:
|
|
"""
|
|
Generate Mem0 configuration from settings with support for OpenAI and Ollama
|
|
|
|
Args:
|
|
settings: Application settings
|
|
|
|
Returns:
|
|
Dict containing Mem0 configuration
|
|
"""
|
|
# LLM Configuration - Switch between OpenAI and Ollama
|
|
if settings.llm_provider.lower() == "ollama":
|
|
llm_config = {
|
|
"provider": "ollama",
|
|
"config": {
|
|
"model": settings.ollama_llm_model,
|
|
"temperature": 0.1,
|
|
"max_tokens": 2000,
|
|
"ollama_base_url": settings.ollama_base_url
|
|
}
|
|
}
|
|
else: # Default to OpenAI
|
|
if not settings.openai_api_key:
|
|
raise ValueError("OPENAI_API_KEY is required when LLM_PROVIDER=openai")
|
|
llm_config = {
|
|
"provider": "openai",
|
|
"config": {
|
|
"model": "gpt-4o-mini",
|
|
"temperature": 0.1,
|
|
"max_tokens": 2000,
|
|
"api_key": settings.openai_api_key
|
|
}
|
|
}
|
|
|
|
# Embedder Configuration - Switch between OpenAI and Ollama
|
|
if settings.embedder_provider.lower() == "ollama":
|
|
embedder_config = {
|
|
"provider": "ollama",
|
|
"config": {
|
|
"model": settings.ollama_embedding_model,
|
|
"ollama_base_url": settings.ollama_base_url
|
|
}
|
|
}
|
|
else: # Default to OpenAI
|
|
if not settings.openai_api_key:
|
|
raise ValueError("OPENAI_API_KEY is required when EMBEDDER_PROVIDER=openai")
|
|
embedder_config = {
|
|
"provider": "openai",
|
|
"config": {
|
|
"model": "text-embedding-3-small",
|
|
"embedding_dims": settings.mem0_embedding_dims,
|
|
"api_key": settings.openai_api_key
|
|
}
|
|
}
|
|
|
|
return {
|
|
# Vector Store - Supabase
|
|
"vector_store": {
|
|
"provider": "supabase",
|
|
"config": {
|
|
"connection_string": settings.supabase_connection_string,
|
|
"collection_name": settings.mem0_collection_name,
|
|
"embedding_model_dims": settings.mem0_embedding_dims,
|
|
"index_method": "hnsw", # Fastest search
|
|
"index_measure": "cosine_distance" # Best for embeddings
|
|
}
|
|
},
|
|
|
|
# Graph Store - Neo4j
|
|
"graph_store": {
|
|
"provider": "neo4j",
|
|
"config": {
|
|
"url": settings.neo4j_uri,
|
|
"username": settings.neo4j_user,
|
|
"password": settings.neo4j_password
|
|
}
|
|
},
|
|
|
|
# LLM Provider - Dynamic (OpenAI or Ollama)
|
|
"llm": llm_config,
|
|
|
|
# Embedder - Dynamic (OpenAI or Ollama)
|
|
"embedder": embedder_config,
|
|
|
|
# Version
|
|
"version": settings.mem0_version
|
|
}
|
|
|
|
|
|
# Global settings instance
|
|
settings = get_settings()
|
|
|
|
# Global mem0 config
|
|
mem0_config = get_mem0_config(settings)
|