Feature (OpenMemory): Add support for LLM and Embedding Providers in OpenMemory (#2794)
This commit is contained in:
@@ -7,6 +7,7 @@ WORKDIR /usr/src/openmemory
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY config.json .
|
||||
COPY . .
|
||||
|
||||
EXPOSE 8765
|
||||
|
||||
40
openmemory/api/alembic/versions/add_config_table.py
Normal file
40
openmemory/api/alembic/versions/add_config_table.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""add_config_table
|
||||
|
||||
Revision ID: add_config_table
|
||||
Revises: 0b53c747049a
|
||||
Create Date: 2023-06-01 10:00:00.000000
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
import uuid
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'add_config_table'
|
||||
down_revision = '0b53c747049a'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# Create configs table if it doesn't exist
|
||||
op.create_table(
|
||||
'configs',
|
||||
sa.Column('id', sa.UUID(), nullable=False, default=lambda: uuid.uuid4()),
|
||||
sa.Column('key', sa.String(), nullable=False),
|
||||
sa.Column('value', sa.JSON(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('key')
|
||||
)
|
||||
|
||||
# Create index for key lookups
|
||||
op.create_index('idx_configs_key', 'configs', ['key'])
|
||||
|
||||
|
||||
def downgrade():
|
||||
# Drop the configs table
|
||||
op.drop_index('idx_configs_key', 'configs')
|
||||
op.drop_table('configs')
|
||||
@@ -1,3 +1,20 @@
|
||||
"""
|
||||
MCP Server for OpenMemory with resilient memory client handling.
|
||||
|
||||
This module implements an MCP (Model Context Protocol) server that provides
|
||||
memory operations for OpenMemory. The memory client is initialized lazily
|
||||
to prevent server crashes when external dependencies (like Ollama) are
|
||||
unavailable. If the memory client cannot be initialized, the server will
|
||||
continue running with limited functionality and appropriate error messages.
|
||||
|
||||
Key features:
|
||||
- Lazy memory client initialization
|
||||
- Graceful error handling for unavailable dependencies
|
||||
- Fallback to database-only mode when vector store is unavailable
|
||||
- Proper logging for debugging connection issues
|
||||
- Environment variable parsing for API keys
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
@@ -19,14 +36,17 @@ from qdrant_client import models as qdrant_models
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize MCP and memory client
|
||||
# Initialize MCP
|
||||
mcp = FastMCP("mem0-mcp-server")
|
||||
|
||||
# Check if OpenAI API key is set
|
||||
if not os.getenv("OPENAI_API_KEY"):
|
||||
raise Exception("OPENAI_API_KEY is not set in .env file")
|
||||
|
||||
memory_client = get_memory_client()
|
||||
# Don't initialize memory client at import time - do it lazily when needed
|
||||
def get_memory_client_safe():
|
||||
"""Get memory client with error handling. Returns None if client cannot be initialized."""
|
||||
try:
|
||||
return get_memory_client()
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to get memory client: {e}")
|
||||
return None
|
||||
|
||||
# Context variables for user_id and client_name
|
||||
user_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("user_id")
|
||||
@@ -48,6 +68,11 @@ async def add_memories(text: str) -> str:
|
||||
if not client_name:
|
||||
return "Error: client_name not provided"
|
||||
|
||||
# Get memory client safely
|
||||
memory_client = get_memory_client_safe()
|
||||
if not memory_client:
|
||||
return "Error: Memory system is currently unavailable. Please try again later."
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
try:
|
||||
@@ -113,6 +138,7 @@ async def add_memories(text: str) -> str:
|
||||
finally:
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logging.exception(f"Error adding to memory: {e}")
|
||||
return f"Error adding to memory: {e}"
|
||||
|
||||
|
||||
@@ -124,6 +150,12 @@ async def search_memory(query: str) -> str:
|
||||
return "Error: user_id not provided"
|
||||
if not client_name:
|
||||
return "Error: client_name not provided"
|
||||
|
||||
# Get memory client safely
|
||||
memory_client = get_memory_client_safe()
|
||||
if not memory_client:
|
||||
return "Error: Memory system is currently unavailable. Please try again later."
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
try:
|
||||
@@ -216,6 +248,12 @@ async def list_memories() -> str:
|
||||
return "Error: user_id not provided"
|
||||
if not client_name:
|
||||
return "Error: client_name not provided"
|
||||
|
||||
# Get memory client safely
|
||||
memory_client = get_memory_client_safe()
|
||||
if not memory_client:
|
||||
return "Error: Memory system is currently unavailable. Please try again later."
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
try:
|
||||
@@ -267,6 +305,7 @@ async def list_memories() -> str:
|
||||
finally:
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logging.exception(f"Error getting memories: {e}")
|
||||
return f"Error getting memories: {e}"
|
||||
|
||||
|
||||
@@ -278,6 +317,12 @@ async def delete_all_memories() -> str:
|
||||
return "Error: user_id not provided"
|
||||
if not client_name:
|
||||
return "Error: client_name not provided"
|
||||
|
||||
# Get memory client safely
|
||||
memory_client = get_memory_client_safe()
|
||||
if not memory_client:
|
||||
return "Error: Memory system is currently unavailable. Please try again later."
|
||||
|
||||
try:
|
||||
db = SessionLocal()
|
||||
try:
|
||||
@@ -289,7 +334,10 @@ async def delete_all_memories() -> str:
|
||||
|
||||
# delete the accessible memories only
|
||||
for memory_id in accessible_memory_ids:
|
||||
memory_client.delete(memory_id)
|
||||
try:
|
||||
memory_client.delete(memory_id)
|
||||
except Exception as delete_error:
|
||||
logging.warning(f"Failed to delete memory {memory_id} from vector store: {delete_error}")
|
||||
|
||||
# Update each memory's state and create history entries
|
||||
now = datetime.datetime.now(datetime.UTC)
|
||||
@@ -322,6 +370,7 @@ async def delete_all_memories() -> str:
|
||||
finally:
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logging.exception(f"Error deleting memories: {e}")
|
||||
return f"Error deleting memories: {e}"
|
||||
|
||||
|
||||
|
||||
@@ -56,6 +56,17 @@ class App(Base):
|
||||
memories = relationship("Memory", back_populates="app")
|
||||
|
||||
|
||||
class Config(Base):
|
||||
__tablename__ = "configs"
|
||||
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
|
||||
key = Column(String, unique=True, nullable=False, index=True)
|
||||
value = Column(JSON, nullable=False)
|
||||
created_at = Column(DateTime, default=get_current_utc_time)
|
||||
updated_at = Column(DateTime,
|
||||
default=get_current_utc_time,
|
||||
onupdate=get_current_utc_time)
|
||||
|
||||
|
||||
class Memory(Base):
|
||||
__tablename__ = "memories"
|
||||
id = Column(UUID, primary_key=True, default=lambda: uuid.uuid4())
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from .memories import router as memories_router
|
||||
from .apps import router as apps_router
|
||||
from .stats import router as stats_router
|
||||
from .config import router as config_router
|
||||
|
||||
__all__ = ["memories_router", "apps_router", "stats_router"]
|
||||
__all__ = ["memories_router", "apps_router", "stats_router", "config_router"]
|
||||
240
openmemory/api/app/routers/config.py
Normal file
240
openmemory/api/app/routers/config.py
Normal file
@@ -0,0 +1,240 @@
|
||||
import os
|
||||
import json
|
||||
from typing import Dict, Any, Optional
|
||||
from fastapi import APIRouter, HTTPException, Depends
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import Session
|
||||
from app.database import get_db
|
||||
from app.models import Config as ConfigModel
|
||||
from app.utils.memory import reset_memory_client
|
||||
|
||||
router = APIRouter(prefix="/api/v1/config", tags=["config"])
|
||||
|
||||
class LLMConfig(BaseModel):
|
||||
model: str = Field(..., description="LLM model name")
|
||||
temperature: float = Field(..., description="Temperature setting for the model")
|
||||
max_tokens: int = Field(..., description="Maximum tokens to generate")
|
||||
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
|
||||
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
|
||||
|
||||
class LLMProvider(BaseModel):
|
||||
provider: str = Field(..., description="LLM provider name")
|
||||
config: LLMConfig
|
||||
|
||||
class EmbedderConfig(BaseModel):
|
||||
model: str = Field(..., description="Embedder model name")
|
||||
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
|
||||
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
|
||||
|
||||
class EmbedderProvider(BaseModel):
|
||||
provider: str = Field(..., description="Embedder provider name")
|
||||
config: EmbedderConfig
|
||||
|
||||
class OpenMemoryConfig(BaseModel):
|
||||
custom_instructions: Optional[str] = Field(None, description="Custom instructions for memory management and fact extraction")
|
||||
|
||||
class Mem0Config(BaseModel):
|
||||
llm: Optional[LLMProvider] = None
|
||||
embedder: Optional[EmbedderProvider] = None
|
||||
|
||||
class ConfigSchema(BaseModel):
|
||||
openmemory: Optional[OpenMemoryConfig] = None
|
||||
mem0: Mem0Config
|
||||
|
||||
def get_default_configuration():
|
||||
"""Get the default configuration with sensible defaults for LLM and embedder."""
|
||||
return {
|
||||
"openmemory": {
|
||||
"custom_instructions": None
|
||||
},
|
||||
"mem0": {
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "gpt-4o-mini",
|
||||
"temperature": 0.1,
|
||||
"max_tokens": 2000,
|
||||
"api_key": "env:OPENAI_API_KEY"
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-3-small",
|
||||
"api_key": "env:OPENAI_API_KEY"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def get_config_from_db(db: Session, key: str = "main"):
|
||||
"""Get configuration from database."""
|
||||
config = db.query(ConfigModel).filter(ConfigModel.key == key).first()
|
||||
|
||||
if not config:
|
||||
# Create default config with proper provider configurations
|
||||
default_config = get_default_configuration()
|
||||
db_config = ConfigModel(key=key, value=default_config)
|
||||
db.add(db_config)
|
||||
db.commit()
|
||||
db.refresh(db_config)
|
||||
return default_config
|
||||
|
||||
# Ensure the config has all required sections with defaults
|
||||
config_value = config.value
|
||||
default_config = get_default_configuration()
|
||||
|
||||
# Merge with defaults to ensure all required fields exist
|
||||
if "openmemory" not in config_value:
|
||||
config_value["openmemory"] = default_config["openmemory"]
|
||||
|
||||
if "mem0" not in config_value:
|
||||
config_value["mem0"] = default_config["mem0"]
|
||||
else:
|
||||
# Ensure LLM config exists with defaults
|
||||
if "llm" not in config_value["mem0"] or config_value["mem0"]["llm"] is None:
|
||||
config_value["mem0"]["llm"] = default_config["mem0"]["llm"]
|
||||
|
||||
# Ensure embedder config exists with defaults
|
||||
if "embedder" not in config_value["mem0"] or config_value["mem0"]["embedder"] is None:
|
||||
config_value["mem0"]["embedder"] = default_config["mem0"]["embedder"]
|
||||
|
||||
# Save the updated config back to database if it was modified
|
||||
if config_value != config.value:
|
||||
config.value = config_value
|
||||
db.commit()
|
||||
db.refresh(config)
|
||||
|
||||
return config_value
|
||||
|
||||
def save_config_to_db(db: Session, config: Dict[str, Any], key: str = "main"):
|
||||
"""Save configuration to database."""
|
||||
db_config = db.query(ConfigModel).filter(ConfigModel.key == key).first()
|
||||
|
||||
if db_config:
|
||||
db_config.value = config
|
||||
db_config.updated_at = None # Will trigger the onupdate to set current time
|
||||
else:
|
||||
db_config = ConfigModel(key=key, value=config)
|
||||
db.add(db_config)
|
||||
|
||||
db.commit()
|
||||
db.refresh(db_config)
|
||||
return db_config.value
|
||||
|
||||
@router.get("/", response_model=ConfigSchema)
|
||||
async def get_configuration(db: Session = Depends(get_db)):
|
||||
"""Get the current configuration."""
|
||||
config = get_config_from_db(db)
|
||||
return config
|
||||
|
||||
@router.put("/", response_model=ConfigSchema)
|
||||
async def update_configuration(config: ConfigSchema, db: Session = Depends(get_db)):
|
||||
"""Update the configuration."""
|
||||
current_config = get_config_from_db(db)
|
||||
|
||||
# Convert to dict for processing
|
||||
updated_config = current_config.copy()
|
||||
|
||||
# Update openmemory settings if provided
|
||||
if config.openmemory is not None:
|
||||
if "openmemory" not in updated_config:
|
||||
updated_config["openmemory"] = {}
|
||||
updated_config["openmemory"].update(config.openmemory.dict(exclude_none=True))
|
||||
|
||||
# Update mem0 settings
|
||||
updated_config["mem0"] = config.mem0.dict(exclude_none=True)
|
||||
|
||||
# Save the configuration to database
|
||||
save_config_to_db(db, updated_config)
|
||||
reset_memory_client()
|
||||
return updated_config
|
||||
|
||||
@router.post("/reset", response_model=ConfigSchema)
|
||||
async def reset_configuration(db: Session = Depends(get_db)):
|
||||
"""Reset the configuration to default values."""
|
||||
try:
|
||||
# Get the default configuration with proper provider setups
|
||||
default_config = get_default_configuration()
|
||||
|
||||
# Save it as the current configuration in the database
|
||||
save_config_to_db(db, default_config)
|
||||
reset_memory_client()
|
||||
return default_config
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to reset configuration: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/mem0/llm", response_model=LLMProvider)
|
||||
async def get_llm_configuration(db: Session = Depends(get_db)):
|
||||
"""Get only the LLM configuration."""
|
||||
config = get_config_from_db(db)
|
||||
llm_config = config.get("mem0", {}).get("llm", {})
|
||||
return llm_config
|
||||
|
||||
@router.put("/mem0/llm", response_model=LLMProvider)
|
||||
async def update_llm_configuration(llm_config: LLMProvider, db: Session = Depends(get_db)):
|
||||
"""Update only the LLM configuration."""
|
||||
current_config = get_config_from_db(db)
|
||||
|
||||
# Ensure mem0 key exists
|
||||
if "mem0" not in current_config:
|
||||
current_config["mem0"] = {}
|
||||
|
||||
# Update the LLM configuration
|
||||
current_config["mem0"]["llm"] = llm_config.dict(exclude_none=True)
|
||||
|
||||
# Save the configuration to database
|
||||
save_config_to_db(db, current_config)
|
||||
reset_memory_client()
|
||||
return current_config["mem0"]["llm"]
|
||||
|
||||
@router.get("/mem0/embedder", response_model=EmbedderProvider)
|
||||
async def get_embedder_configuration(db: Session = Depends(get_db)):
|
||||
"""Get only the Embedder configuration."""
|
||||
config = get_config_from_db(db)
|
||||
embedder_config = config.get("mem0", {}).get("embedder", {})
|
||||
return embedder_config
|
||||
|
||||
@router.put("/mem0/embedder", response_model=EmbedderProvider)
|
||||
async def update_embedder_configuration(embedder_config: EmbedderProvider, db: Session = Depends(get_db)):
|
||||
"""Update only the Embedder configuration."""
|
||||
current_config = get_config_from_db(db)
|
||||
|
||||
# Ensure mem0 key exists
|
||||
if "mem0" not in current_config:
|
||||
current_config["mem0"] = {}
|
||||
|
||||
# Update the Embedder configuration
|
||||
current_config["mem0"]["embedder"] = embedder_config.dict(exclude_none=True)
|
||||
|
||||
# Save the configuration to database
|
||||
save_config_to_db(db, current_config)
|
||||
reset_memory_client()
|
||||
return current_config["mem0"]["embedder"]
|
||||
|
||||
@router.get("/openmemory", response_model=OpenMemoryConfig)
|
||||
async def get_openmemory_configuration(db: Session = Depends(get_db)):
|
||||
"""Get only the OpenMemory configuration."""
|
||||
config = get_config_from_db(db)
|
||||
openmemory_config = config.get("openmemory", {})
|
||||
return openmemory_config
|
||||
|
||||
@router.put("/openmemory", response_model=OpenMemoryConfig)
|
||||
async def update_openmemory_configuration(openmemory_config: OpenMemoryConfig, db: Session = Depends(get_db)):
|
||||
"""Update only the OpenMemory configuration."""
|
||||
current_config = get_config_from_db(db)
|
||||
|
||||
# Ensure openmemory key exists
|
||||
if "openmemory" not in current_config:
|
||||
current_config["openmemory"] = {}
|
||||
|
||||
# Update the OpenMemory configuration
|
||||
current_config["openmemory"].update(openmemory_config.dict(exclude_none=True))
|
||||
|
||||
# Save the configuration to database
|
||||
save_config_to_db(db, current_config)
|
||||
reset_memory_client()
|
||||
return current_config["openmemory"]
|
||||
@@ -2,6 +2,7 @@ from datetime import datetime, UTC
|
||||
from typing import List, Optional, Set
|
||||
from uuid import UUID, uuid4
|
||||
import logging
|
||||
import os
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from sqlalchemy.orm import Session, joinedload
|
||||
from fastapi_pagination import Page, Params
|
||||
@@ -13,13 +14,11 @@ from app.utils.memory import get_memory_client
|
||||
from app.database import get_db
|
||||
from app.models import (
|
||||
Memory, MemoryState, MemoryAccessLog, App,
|
||||
MemoryStatusHistory, User, Category, AccessControl
|
||||
MemoryStatusHistory, User, Category, AccessControl, Config as ConfigModel
|
||||
)
|
||||
from app.schemas import MemoryResponse, PaginatedMemoryResponse
|
||||
from app.utils.permissions import check_memory_access_permissions
|
||||
|
||||
memory_client = get_memory_client()
|
||||
|
||||
router = APIRouter(prefix="/api/v1/memories", tags=["memories"])
|
||||
|
||||
|
||||
@@ -227,100 +226,79 @@ async def create_memory(
|
||||
# Log what we're about to do
|
||||
logging.info(f"Creating memory for user_id: {request.user_id} with app: {request.app}")
|
||||
|
||||
# Save to Qdrant via memory_client
|
||||
qdrant_response = memory_client.add(
|
||||
request.text,
|
||||
user_id=request.user_id, # Use string user_id to match search
|
||||
metadata={
|
||||
"source_app": "openmemory",
|
||||
"mcp_client": request.app,
|
||||
}
|
||||
)
|
||||
|
||||
# Log the response for debugging
|
||||
logging.info(f"Qdrant response: {qdrant_response}")
|
||||
|
||||
# Process Qdrant response
|
||||
if isinstance(qdrant_response, dict) and 'results' in qdrant_response:
|
||||
for result in qdrant_response['results']:
|
||||
if result['event'] == 'ADD':
|
||||
# Get the Qdrant-generated ID
|
||||
memory_id = UUID(result['id'])
|
||||
|
||||
# Check if memory already exists
|
||||
existing_memory = db.query(Memory).filter(Memory.id == memory_id).first()
|
||||
|
||||
if existing_memory:
|
||||
# Update existing memory
|
||||
existing_memory.state = MemoryState.active
|
||||
existing_memory.content = result['memory']
|
||||
memory = existing_memory
|
||||
else:
|
||||
# Create memory with the EXACT SAME ID from Qdrant
|
||||
memory = Memory(
|
||||
id=memory_id, # Use the same ID that Qdrant generated
|
||||
user_id=user.id,
|
||||
app_id=app_obj.id,
|
||||
content=result['memory'],
|
||||
metadata_=request.metadata,
|
||||
state=MemoryState.active
|
||||
)
|
||||
db.add(memory)
|
||||
|
||||
# Create history entry
|
||||
history = MemoryStatusHistory(
|
||||
memory_id=memory_id,
|
||||
changed_by=user.id,
|
||||
old_state=MemoryState.deleted if existing_memory else MemoryState.deleted,
|
||||
new_state=MemoryState.active
|
||||
)
|
||||
db.add(history)
|
||||
|
||||
db.commit()
|
||||
db.refresh(memory)
|
||||
return memory
|
||||
|
||||
# Fallback to traditional DB-only approach if Qdrant integration fails
|
||||
# Generate a random UUID for the memory
|
||||
memory_id = uuid4()
|
||||
memory = Memory(
|
||||
id=memory_id,
|
||||
user_id=user.id,
|
||||
app_id=app_obj.id,
|
||||
content=request.text,
|
||||
metadata_=request.metadata
|
||||
)
|
||||
db.add(memory)
|
||||
|
||||
# Create history entry
|
||||
history = MemoryStatusHistory(
|
||||
memory_id=memory_id,
|
||||
changed_by=user.id,
|
||||
old_state=MemoryState.deleted,
|
||||
new_state=MemoryState.active
|
||||
)
|
||||
db.add(history)
|
||||
|
||||
db.commit()
|
||||
db.refresh(memory)
|
||||
|
||||
# Attempt to add to Qdrant with the same ID we just created
|
||||
# Try to get memory client safely
|
||||
try:
|
||||
# Try to add with our specific ID
|
||||
memory_client.add(
|
||||
memory_client = get_memory_client()
|
||||
if not memory_client:
|
||||
raise Exception("Memory client is not available")
|
||||
except Exception as client_error:
|
||||
logging.warning(f"Memory client unavailable: {client_error}. Creating memory in database only.")
|
||||
# Return a json response with the error
|
||||
return {
|
||||
"error": str(client_error)
|
||||
}
|
||||
|
||||
# Try to save to Qdrant via memory_client
|
||||
try:
|
||||
qdrant_response = memory_client.add(
|
||||
request.text,
|
||||
memory_id=str(memory_id), # Specify the ID
|
||||
user_id=request.user_id,
|
||||
user_id=request.user_id, # Use string user_id to match search
|
||||
metadata={
|
||||
"source_app": "openmemory",
|
||||
"mcp_client": request.app,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to add to Qdrant in fallback path: {e}")
|
||||
# Continue anyway, the DB record is created
|
||||
|
||||
return memory
|
||||
|
||||
# Log the response for debugging
|
||||
logging.info(f"Qdrant response: {qdrant_response}")
|
||||
|
||||
# Process Qdrant response
|
||||
if isinstance(qdrant_response, dict) and 'results' in qdrant_response:
|
||||
for result in qdrant_response['results']:
|
||||
if result['event'] == 'ADD':
|
||||
# Get the Qdrant-generated ID
|
||||
memory_id = UUID(result['id'])
|
||||
|
||||
# Check if memory already exists
|
||||
existing_memory = db.query(Memory).filter(Memory.id == memory_id).first()
|
||||
|
||||
if existing_memory:
|
||||
# Update existing memory
|
||||
existing_memory.state = MemoryState.active
|
||||
existing_memory.content = result['memory']
|
||||
memory = existing_memory
|
||||
else:
|
||||
# Create memory with the EXACT SAME ID from Qdrant
|
||||
memory = Memory(
|
||||
id=memory_id, # Use the same ID that Qdrant generated
|
||||
user_id=user.id,
|
||||
app_id=app_obj.id,
|
||||
content=result['memory'],
|
||||
metadata_=request.metadata,
|
||||
state=MemoryState.active
|
||||
)
|
||||
db.add(memory)
|
||||
|
||||
# Create history entry
|
||||
history = MemoryStatusHistory(
|
||||
memory_id=memory_id,
|
||||
changed_by=user.id,
|
||||
old_state=MemoryState.deleted if existing_memory else MemoryState.deleted,
|
||||
new_state=MemoryState.active
|
||||
)
|
||||
db.add(history)
|
||||
|
||||
db.commit()
|
||||
db.refresh(memory)
|
||||
return memory
|
||||
except Exception as qdrant_error:
|
||||
logging.warning(f"Qdrant operation failed: {qdrant_error}.")
|
||||
# Return a json response with the error
|
||||
return {
|
||||
"error": str(qdrant_error)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
# Get memory by ID
|
||||
|
||||
@@ -1,9 +1,193 @@
|
||||
"""
|
||||
Memory client utilities for OpenMemory.
|
||||
|
||||
This module provides functionality to initialize and manage the Mem0 memory client
|
||||
with automatic configuration management and Docker environment support.
|
||||
|
||||
Docker Ollama Configuration:
|
||||
When running inside a Docker container and using Ollama as the LLM or embedder provider,
|
||||
the system automatically detects the Docker environment and adjusts localhost URLs
|
||||
to properly reach the host machine where Ollama is running.
|
||||
|
||||
Supported Docker host resolution (in order of preference):
|
||||
1. OLLAMA_HOST environment variable (if set)
|
||||
2. host.docker.internal (Docker Desktop for Mac/Windows)
|
||||
3. Docker bridge gateway IP (typically 172.17.0.1 on Linux)
|
||||
4. Fallback to 172.17.0.1
|
||||
|
||||
Example configuration that will be automatically adjusted:
|
||||
{
|
||||
"llm": {
|
||||
"provider": "ollama",
|
||||
"config": {
|
||||
"model": "llama3.1:latest",
|
||||
"ollama_base_url": "http://localhost:11434" # Auto-adjusted in Docker
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import hashlib
|
||||
import socket
|
||||
import platform
|
||||
|
||||
from mem0 import Memory
|
||||
from app.database import SessionLocal
|
||||
from app.models import Config as ConfigModel
|
||||
|
||||
|
||||
memory_client = None
|
||||
_memory_client = None
|
||||
_config_hash = None
|
||||
|
||||
|
||||
def _get_config_hash(config_dict):
|
||||
"""Generate a hash of the config to detect changes."""
|
||||
config_str = json.dumps(config_dict, sort_keys=True)
|
||||
return hashlib.md5(config_str.encode()).hexdigest()
|
||||
|
||||
|
||||
def _get_docker_host_url():
|
||||
"""
|
||||
Determine the appropriate host URL to reach host machine from inside Docker container.
|
||||
Returns the best available option for reaching the host from inside a container.
|
||||
"""
|
||||
# Check for custom environment variable first
|
||||
custom_host = os.environ.get('OLLAMA_HOST')
|
||||
if custom_host:
|
||||
print(f"Using custom Ollama host from OLLAMA_HOST: {custom_host}")
|
||||
return custom_host.replace('http://', '').replace('https://', '').split(':')[0]
|
||||
|
||||
# Check if we're running inside Docker
|
||||
if not os.path.exists('/.dockerenv'):
|
||||
# Not in Docker, return localhost as-is
|
||||
return "localhost"
|
||||
|
||||
print("Detected Docker environment, adjusting host URL for Ollama...")
|
||||
|
||||
# Try different host resolution strategies
|
||||
host_candidates = []
|
||||
|
||||
# 1. host.docker.internal (works on Docker Desktop for Mac/Windows)
|
||||
try:
|
||||
socket.gethostbyname('host.docker.internal')
|
||||
host_candidates.append('host.docker.internal')
|
||||
print("Found host.docker.internal")
|
||||
except socket.gaierror:
|
||||
pass
|
||||
|
||||
# 2. Docker bridge gateway (typically 172.17.0.1 on Linux)
|
||||
try:
|
||||
with open('/proc/net/route', 'r') as f:
|
||||
for line in f:
|
||||
fields = line.strip().split()
|
||||
if fields[1] == '00000000': # Default route
|
||||
gateway_hex = fields[2]
|
||||
gateway_ip = socket.inet_ntoa(bytes.fromhex(gateway_hex)[::-1])
|
||||
host_candidates.append(gateway_ip)
|
||||
print(f"Found Docker gateway: {gateway_ip}")
|
||||
break
|
||||
except (FileNotFoundError, IndexError, ValueError):
|
||||
pass
|
||||
|
||||
# 3. Fallback to common Docker bridge IP
|
||||
if not host_candidates:
|
||||
host_candidates.append('172.17.0.1')
|
||||
print("Using fallback Docker bridge IP: 172.17.0.1")
|
||||
|
||||
# Return the first available candidate
|
||||
return host_candidates[0]
|
||||
|
||||
|
||||
def _fix_ollama_urls(config_section):
|
||||
"""
|
||||
Fix Ollama URLs for Docker environment.
|
||||
Replaces localhost URLs with appropriate Docker host URLs.
|
||||
Sets default ollama_base_url if not provided.
|
||||
"""
|
||||
if not config_section or "config" not in config_section:
|
||||
return config_section
|
||||
|
||||
ollama_config = config_section["config"]
|
||||
|
||||
# Set default ollama_base_url if not provided
|
||||
if "ollama_base_url" not in ollama_config:
|
||||
ollama_config["ollama_base_url"] = "http://host.docker.internal:11434"
|
||||
else:
|
||||
# Check for ollama_base_url and fix if it's localhost
|
||||
url = ollama_config["ollama_base_url"]
|
||||
if "localhost" in url or "127.0.0.1" in url:
|
||||
docker_host = _get_docker_host_url()
|
||||
if docker_host != "localhost":
|
||||
new_url = url.replace("localhost", docker_host).replace("127.0.0.1", docker_host)
|
||||
ollama_config["ollama_base_url"] = new_url
|
||||
print(f"Adjusted Ollama URL from {url} to {new_url}")
|
||||
|
||||
return config_section
|
||||
|
||||
|
||||
def reset_memory_client():
|
||||
"""Reset the global memory client to force reinitialization with new config."""
|
||||
global _memory_client, _config_hash
|
||||
_memory_client = None
|
||||
_config_hash = None
|
||||
|
||||
|
||||
def get_default_memory_config():
|
||||
"""Get default memory client configuration with sensible defaults."""
|
||||
return {
|
||||
"vector_store": {
|
||||
"provider": "qdrant",
|
||||
"config": {
|
||||
"collection_name": "openmemory",
|
||||
"host": "mem0_store",
|
||||
"port": 6333,
|
||||
}
|
||||
},
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "gpt-4o-mini",
|
||||
"temperature": 0.1,
|
||||
"max_tokens": 2000,
|
||||
"api_key": "env:OPENAI_API_KEY"
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-3-small",
|
||||
"api_key": "env:OPENAI_API_KEY"
|
||||
}
|
||||
},
|
||||
"version": "v1.1"
|
||||
}
|
||||
|
||||
|
||||
def _parse_environment_variables(config_dict):
|
||||
"""
|
||||
Parse environment variables in config values.
|
||||
Converts 'env:VARIABLE_NAME' to actual environment variable values.
|
||||
"""
|
||||
if isinstance(config_dict, dict):
|
||||
parsed_config = {}
|
||||
for key, value in config_dict.items():
|
||||
if isinstance(value, str) and value.startswith("env:"):
|
||||
env_var = value.split(":", 1)[1]
|
||||
env_value = os.environ.get(env_var)
|
||||
if env_value:
|
||||
parsed_config[key] = env_value
|
||||
print(f"Loaded {env_var} from environment for {key}")
|
||||
else:
|
||||
print(f"Warning: Environment variable {env_var} not found, keeping original value")
|
||||
parsed_config[key] = value
|
||||
elif isinstance(value, dict):
|
||||
parsed_config[key] = _parse_environment_variables(value)
|
||||
else:
|
||||
parsed_config[key] = value
|
||||
return parsed_config
|
||||
return config_dict
|
||||
|
||||
|
||||
def get_memory_client(custom_instructions: str = None):
|
||||
@@ -14,37 +198,94 @@ def get_memory_client(custom_instructions: str = None):
|
||||
custom_instructions: Optional instructions for the memory project.
|
||||
|
||||
Returns:
|
||||
Initialized Mem0 client instance.
|
||||
Initialized Mem0 client instance or None if initialization fails.
|
||||
|
||||
Raises:
|
||||
Exception: If required API keys are not set.
|
||||
Exception: If required API keys are not set or critical configuration is missing.
|
||||
"""
|
||||
global memory_client
|
||||
|
||||
if memory_client is not None:
|
||||
return memory_client
|
||||
global _memory_client, _config_hash
|
||||
|
||||
try:
|
||||
config = {
|
||||
"vector_store": {
|
||||
"provider": "qdrant",
|
||||
"config": {
|
||||
"collection_name": "openmemory",
|
||||
"host": "mem0_store",
|
||||
"port": 6333,
|
||||
}
|
||||
}
|
||||
}
|
||||
# Start with default configuration
|
||||
config = get_default_memory_config()
|
||||
|
||||
# Variable to track custom instructions
|
||||
db_custom_instructions = None
|
||||
|
||||
# Load configuration from database
|
||||
try:
|
||||
db = SessionLocal()
|
||||
db_config = db.query(ConfigModel).filter(ConfigModel.key == "main").first()
|
||||
|
||||
if db_config:
|
||||
json_config = db_config.value
|
||||
|
||||
# Extract custom instructions from openmemory settings
|
||||
if "openmemory" in json_config and "custom_instructions" in json_config["openmemory"]:
|
||||
db_custom_instructions = json_config["openmemory"]["custom_instructions"]
|
||||
|
||||
# Override defaults with configurations from the database
|
||||
if "mem0" in json_config:
|
||||
mem0_config = json_config["mem0"]
|
||||
|
||||
# Update LLM configuration if available
|
||||
if "llm" in mem0_config and mem0_config["llm"] is not None:
|
||||
config["llm"] = mem0_config["llm"]
|
||||
|
||||
# Fix Ollama URLs for Docker if needed
|
||||
if config["llm"].get("provider") == "ollama":
|
||||
config["llm"] = _fix_ollama_urls(config["llm"])
|
||||
|
||||
# Update Embedder configuration if available
|
||||
if "embedder" in mem0_config and mem0_config["embedder"] is not None:
|
||||
config["embedder"] = mem0_config["embedder"]
|
||||
|
||||
# Fix Ollama URLs for Docker if needed
|
||||
if config["embedder"].get("provider") == "ollama":
|
||||
config["embedder"] = _fix_ollama_urls(config["embedder"])
|
||||
else:
|
||||
print("No configuration found in database, using defaults")
|
||||
|
||||
db.close()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Warning: Error loading configuration from database: {e}")
|
||||
print("Using default configuration")
|
||||
# Continue with default configuration if database config can't be loaded
|
||||
|
||||
memory_client = Memory.from_config(config_dict=config)
|
||||
except Exception:
|
||||
raise Exception("Exception occurred while initializing memory client")
|
||||
# Use custom_instructions parameter first, then fall back to database value
|
||||
instructions_to_use = custom_instructions or db_custom_instructions
|
||||
if instructions_to_use:
|
||||
config["custom_fact_extraction_prompt"] = instructions_to_use
|
||||
|
||||
# Update project with custom instructions if provided
|
||||
if custom_instructions:
|
||||
memory_client.update_project(custom_instructions=custom_instructions)
|
||||
# ALWAYS parse environment variables in the final config
|
||||
# This ensures that even default config values like "env:OPENAI_API_KEY" get parsed
|
||||
print("Parsing environment variables in final config...")
|
||||
config = _parse_environment_variables(config)
|
||||
|
||||
return memory_client
|
||||
# Check if config has changed by comparing hashes
|
||||
current_config_hash = _get_config_hash(config)
|
||||
|
||||
# Only reinitialize if config changed or client doesn't exist
|
||||
if _memory_client is None or _config_hash != current_config_hash:
|
||||
print(f"Initializing memory client with config hash: {current_config_hash}")
|
||||
try:
|
||||
_memory_client = Memory.from_config(config_dict=config)
|
||||
_config_hash = current_config_hash
|
||||
print("Memory client initialized successfully")
|
||||
except Exception as init_error:
|
||||
print(f"Warning: Failed to initialize memory client: {init_error}")
|
||||
print("Server will continue running with limited memory functionality")
|
||||
_memory_client = None
|
||||
_config_hash = None
|
||||
return None
|
||||
|
||||
return _memory_client
|
||||
|
||||
except Exception as e:
|
||||
print(f"Warning: Exception occurred while initializing memory client: {e}")
|
||||
print("Server will continue running with limited memory functionality")
|
||||
return None
|
||||
|
||||
|
||||
def get_default_user_id():
|
||||
|
||||
20
openmemory/api/config.json
Normal file
20
openmemory/api/config.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"mem0": {
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "gpt-4o-mini",
|
||||
"temperature": 0.1,
|
||||
"max_tokens": 2000,
|
||||
"api_key": "env:API_KEY"
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-3-small",
|
||||
"api_key": "env:API_KEY"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
20
openmemory/api/default_config.json
Normal file
20
openmemory/api/default_config.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"mem0": {
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "gpt-4o-mini",
|
||||
"temperature": 0.1,
|
||||
"max_tokens": 2000,
|
||||
"api_key": "env:OPENAI_API_KEY"
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-3-small",
|
||||
"api_key": "env:OPENAI_API_KEY"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ import datetime
|
||||
from fastapi import FastAPI
|
||||
from app.database import engine, Base, SessionLocal
|
||||
from app.mcp_server import setup_mcp_server
|
||||
from app.routers import memories_router, apps_router, stats_router
|
||||
from app.routers import memories_router, apps_router, stats_router, config_router
|
||||
from fastapi_pagination import add_pagination
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from app.models import User, App
|
||||
@@ -81,6 +81,7 @@ setup_mcp_server(app)
|
||||
app.include_router(memories_router)
|
||||
app.include_router(apps_router)
|
||||
app.include_router(stats_router)
|
||||
app.include_router(config_router)
|
||||
|
||||
# Add pagination support
|
||||
add_pagination(app)
|
||||
|
||||
@@ -13,3 +13,5 @@ pytest-asyncio>=0.21.0
|
||||
httpx>=0.24.0
|
||||
pytest-cov>=4.0.0
|
||||
tenacity==9.1.2
|
||||
anthropic==0.51.0
|
||||
ollama==0.4.8
|
||||
@@ -10,6 +10,7 @@ services:
|
||||
build: api/
|
||||
environment:
|
||||
- USER
|
||||
- API_KEY
|
||||
env_file:
|
||||
- api/.env
|
||||
depends_on:
|
||||
|
||||
165
openmemory/ui/app/settings/page.tsx
Normal file
165
openmemory/ui/app/settings/page.tsx
Normal file
@@ -0,0 +1,165 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useEffect } from "react"
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { SaveIcon, RotateCcw } from "lucide-react"
|
||||
import { FormView } from "@/components/form-view"
|
||||
import { JsonEditor } from "@/components/json-editor"
|
||||
import { useConfig } from "@/hooks/useConfig"
|
||||
import { useSelector } from "react-redux"
|
||||
import { RootState } from "@/store/store"
|
||||
import { useToast } from "@/components/ui/use-toast"
|
||||
import {
|
||||
AlertDialog,
|
||||
AlertDialogAction,
|
||||
AlertDialogCancel,
|
||||
AlertDialogContent,
|
||||
AlertDialogDescription,
|
||||
AlertDialogFooter,
|
||||
AlertDialogHeader,
|
||||
AlertDialogTitle,
|
||||
AlertDialogTrigger,
|
||||
} from "@/components/ui/alert-dialog"
|
||||
|
||||
export default function SettingsPage() {
|
||||
const { toast } = useToast()
|
||||
const configState = useSelector((state: RootState) => state.config)
|
||||
const [settings, setSettings] = useState({
|
||||
openmemory: configState.openmemory || {
|
||||
custom_instructions: null
|
||||
},
|
||||
mem0: configState.mem0
|
||||
})
|
||||
const [viewMode, setViewMode] = useState<"form" | "json">("form")
|
||||
const { fetchConfig, saveConfig, resetConfig, isLoading, error } = useConfig()
|
||||
|
||||
useEffect(() => {
|
||||
// Load config from API on component mount
|
||||
const loadConfig = async () => {
|
||||
try {
|
||||
await fetchConfig()
|
||||
} catch (error) {
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to load configuration",
|
||||
variant: "destructive",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
loadConfig()
|
||||
}, [])
|
||||
|
||||
// Update local state when redux state changes
|
||||
useEffect(() => {
|
||||
setSettings(prev => ({
|
||||
...prev,
|
||||
openmemory: configState.openmemory || { custom_instructions: null },
|
||||
mem0: configState.mem0
|
||||
}))
|
||||
}, [configState.openmemory, configState.mem0])
|
||||
|
||||
const handleSave = async () => {
|
||||
try {
|
||||
await saveConfig({
|
||||
openmemory: settings.openmemory,
|
||||
mem0: settings.mem0
|
||||
})
|
||||
toast({
|
||||
title: "Settings saved",
|
||||
description: "Your configuration has been updated successfully.",
|
||||
})
|
||||
} catch (error) {
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to save configuration",
|
||||
variant: "destructive",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const handleReset = async () => {
|
||||
try {
|
||||
await resetConfig()
|
||||
toast({
|
||||
title: "Settings reset",
|
||||
description: "Configuration has been reset to default values.",
|
||||
})
|
||||
await fetchConfig()
|
||||
} catch (error) {
|
||||
toast({
|
||||
title: "Error",
|
||||
description: "Failed to reset configuration",
|
||||
variant: "destructive",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="text-white py-6">
|
||||
<div className="container mx-auto py-10 max-w-4xl">
|
||||
<div className="flex justify-between items-center mb-8">
|
||||
<div className="animate-fade-slide-down">
|
||||
<h1 className="text-3xl font-bold tracking-tight">Settings</h1>
|
||||
<p className="text-muted-foreground mt-1">Manage your OpenMemory and Mem0 configuration</p>
|
||||
</div>
|
||||
<div className="flex space-x-2">
|
||||
<AlertDialog>
|
||||
<AlertDialogTrigger asChild>
|
||||
<Button variant="outline" className="border-zinc-800 text-zinc-200 hover:bg-zinc-700 hover:text-zinc-50 animate-fade-slide-down" disabled={isLoading}>
|
||||
<RotateCcw className="mr-2 h-4 w-4" />
|
||||
Reset Defaults
|
||||
</Button>
|
||||
</AlertDialogTrigger>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>Reset Configuration?</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
This will reset all settings to the system defaults. Any custom configuration will be lost.
|
||||
API keys will be set to use environment variables.
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel>Cancel</AlertDialogCancel>
|
||||
<AlertDialogAction onClick={handleReset} className="bg-red-600 hover:bg-red-700">
|
||||
Reset
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
</AlertDialog>
|
||||
|
||||
<Button onClick={handleSave} className="bg-primary hover:bg-primary/90 animate-fade-slide-down" disabled={isLoading}>
|
||||
<SaveIcon className="mr-2 h-4 w-4" />
|
||||
{isLoading ? "Saving..." : "Save Configuration"}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Tabs value={viewMode} onValueChange={(value) => setViewMode(value as "form" | "json")} className="w-full animate-fade-slide-down delay-1">
|
||||
<TabsList className="grid w-full grid-cols-2 mb-8">
|
||||
<TabsTrigger value="form">Form View</TabsTrigger>
|
||||
<TabsTrigger value="json">JSON Editor</TabsTrigger>
|
||||
</TabsList>
|
||||
|
||||
<TabsContent value="form">
|
||||
<FormView settings={settings} onChange={setSettings} />
|
||||
</TabsContent>
|
||||
|
||||
<TabsContent value="json">
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>JSON Configuration</CardTitle>
|
||||
<CardDescription>Edit the entire configuration directly as JSON</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<JsonEditor value={settings} onChange={setSettings} />
|
||||
</CardContent>
|
||||
</Card>
|
||||
</TabsContent>
|
||||
</Tabs>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -11,6 +11,8 @@ import { useMemoriesApi } from "@/hooks/useMemoriesApi";
|
||||
import Image from "next/image";
|
||||
import { useStats } from "@/hooks/useStats";
|
||||
import { useAppsApi } from "@/hooks/useAppsApi";
|
||||
import { Settings } from "lucide-react";
|
||||
import { useConfig } from "@/hooks/useConfig";
|
||||
|
||||
export function Navbar() {
|
||||
const pathname = usePathname();
|
||||
@@ -18,6 +20,7 @@ export function Navbar() {
|
||||
const memoriesApi = useMemoriesApi();
|
||||
const appsApi = useAppsApi();
|
||||
const statsApi = useStats();
|
||||
const configApi = useConfig();
|
||||
|
||||
// Define route matchers with typed parameter extraction
|
||||
const routeBasedFetchMapping: {
|
||||
@@ -52,6 +55,10 @@ export function Navbar() {
|
||||
match: /^\/$/,
|
||||
getFetchers: () => [statsApi.fetchStats, memoriesApi.fetchMemories],
|
||||
},
|
||||
{
|
||||
match: /^\/settings$/,
|
||||
getFetchers: () => [configApi.fetchConfig],
|
||||
},
|
||||
];
|
||||
|
||||
const getFetchersForPath = (path: string) => {
|
||||
@@ -127,6 +134,18 @@ export function Navbar() {
|
||||
Apps
|
||||
</Button>
|
||||
</Link>
|
||||
<Link href="/settings">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
className={`flex items-center gap-2 border-none ${
|
||||
isActive("/settings") ? activeClass : inactiveClass
|
||||
}`}
|
||||
>
|
||||
<Settings />
|
||||
Settings
|
||||
</Button>
|
||||
</Link>
|
||||
</div>
|
||||
<div className="flex items-center gap-4">
|
||||
<Button
|
||||
|
||||
348
openmemory/ui/components/form-view.tsx
Normal file
348
openmemory/ui/components/form-view.tsx
Normal file
@@ -0,0 +1,348 @@
|
||||
"use client"
|
||||
|
||||
import { useState } from "react"
|
||||
import { Eye, EyeOff } from "lucide-react"
|
||||
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "./ui/card"
|
||||
import { Input } from "./ui/input"
|
||||
import { Label } from "./ui/label"
|
||||
import { Slider } from "./ui/slider"
|
||||
import { Switch } from "./ui/switch"
|
||||
import { Button } from "./ui/button"
|
||||
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "./ui/select"
|
||||
import { Textarea } from "./ui/textarea"
|
||||
|
||||
interface FormViewProps {
|
||||
settings: any
|
||||
onChange: (settings: any) => void
|
||||
}
|
||||
|
||||
export function FormView({ settings, onChange }: FormViewProps) {
|
||||
const [showLlmAdvanced, setShowLlmAdvanced] = useState(false)
|
||||
const [showLlmApiKey, setShowLlmApiKey] = useState(false)
|
||||
const [showEmbedderApiKey, setShowEmbedderApiKey] = useState(false)
|
||||
|
||||
const handleOpenMemoryChange = (key: string, value: any) => {
|
||||
onChange({
|
||||
...settings,
|
||||
openmemory: {
|
||||
...settings.openmemory,
|
||||
[key]: value,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const handleLlmProviderChange = (value: string) => {
|
||||
onChange({
|
||||
...settings,
|
||||
mem0: {
|
||||
...settings.mem0,
|
||||
llm: {
|
||||
...settings.mem0.llm,
|
||||
provider: value,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const handleLlmConfigChange = (key: string, value: any) => {
|
||||
onChange({
|
||||
...settings,
|
||||
mem0: {
|
||||
...settings.mem0,
|
||||
llm: {
|
||||
...settings.mem0.llm,
|
||||
config: {
|
||||
...settings.mem0.llm.config,
|
||||
[key]: value,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const handleEmbedderProviderChange = (value: string) => {
|
||||
onChange({
|
||||
...settings,
|
||||
mem0: {
|
||||
...settings.mem0,
|
||||
embedder: {
|
||||
...settings.mem0.embedder,
|
||||
provider: value,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const handleEmbedderConfigChange = (key: string, value: any) => {
|
||||
onChange({
|
||||
...settings,
|
||||
mem0: {
|
||||
...settings.mem0,
|
||||
embedder: {
|
||||
...settings.mem0.embedder,
|
||||
config: {
|
||||
...settings.mem0.embedder.config,
|
||||
[key]: value,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const needsLlmApiKey = settings.mem0?.llm?.provider?.toLowerCase() !== "ollama"
|
||||
const needsEmbedderApiKey = settings.mem0?.embedder?.provider?.toLowerCase() !== "ollama"
|
||||
const isLlmOllama = settings.mem0?.llm?.provider?.toLowerCase() === "ollama"
|
||||
const isEmbedderOllama = settings.mem0?.embedder?.provider?.toLowerCase() === "ollama"
|
||||
|
||||
const LLM_PROVIDERS = [
|
||||
"OpenAI",
|
||||
"Anthropic",
|
||||
"Azure OpenAI",
|
||||
"Ollama",
|
||||
"Together",
|
||||
"Groq",
|
||||
"Litellm",
|
||||
"Mistral AI",
|
||||
"Google AI",
|
||||
"AWS Bedrock",
|
||||
"Gemini",
|
||||
"DeepSeek",
|
||||
"xAI",
|
||||
"LM Studio",
|
||||
"LangChain",
|
||||
]
|
||||
|
||||
const EMBEDDER_PROVIDERS = [
|
||||
"OpenAI",
|
||||
"Azure OpenAI",
|
||||
"Ollama",
|
||||
"Hugging Face",
|
||||
"Vertexai",
|
||||
"Gemini",
|
||||
"Lmstudio",
|
||||
"Together",
|
||||
"LangChain",
|
||||
"AWS Bedrock",
|
||||
]
|
||||
|
||||
return (
|
||||
<div className="space-y-8">
|
||||
{/* OpenMemory Settings */}
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>OpenMemory Settings</CardTitle>
|
||||
<CardDescription>Configure your OpenMemory instance settings</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent className="space-y-6">
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="custom-instructions">Custom Instructions</Label>
|
||||
<Textarea
|
||||
id="custom-instructions"
|
||||
placeholder="Enter custom instructions for memory management..."
|
||||
value={settings.openmemory?.custom_instructions || ""}
|
||||
onChange={(e) => handleOpenMemoryChange("custom_instructions", e.target.value)}
|
||||
className="min-h-[100px]"
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Custom instructions that will be used to guide memory processing and fact extraction.
|
||||
</p>
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
{/* LLM Settings */}
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>LLM Settings</CardTitle>
|
||||
<CardDescription>Configure your Large Language Model provider and settings</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent className="space-y-6">
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="llm-provider">LLM Provider</Label>
|
||||
<Select
|
||||
value={settings.mem0?.llm?.provider || ""}
|
||||
onValueChange={handleLlmProviderChange}
|
||||
>
|
||||
<SelectTrigger id="llm-provider">
|
||||
<SelectValue placeholder="Select a provider" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{LLM_PROVIDERS.map((provider) => (
|
||||
<SelectItem key={provider} value={provider.toLowerCase()}>
|
||||
{provider}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="llm-model">Model</Label>
|
||||
<Input
|
||||
id="llm-model"
|
||||
placeholder="Enter model name"
|
||||
value={settings.mem0?.llm?.config?.model || ""}
|
||||
onChange={(e) => handleLlmConfigChange("model", e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{isLlmOllama && (
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="llm-ollama-url">Ollama Base URL</Label>
|
||||
<Input
|
||||
id="llm-ollama-url"
|
||||
placeholder="http://host.docker.internal:11434"
|
||||
value={settings.mem0?.llm?.config?.ollama_base_url || ""}
|
||||
onChange={(e) => handleLlmConfigChange("ollama_base_url", e.target.value)}
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Leave empty to use default: http://host.docker.internal:11434
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{needsLlmApiKey && (
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="llm-api-key">API Key</Label>
|
||||
<div className="relative">
|
||||
<Input
|
||||
id="llm-api-key"
|
||||
type={showLlmApiKey ? "text" : "password"}
|
||||
placeholder="env:API_KEY"
|
||||
value={settings.mem0?.llm?.config?.api_key || ""}
|
||||
onChange={(e) => handleLlmConfigChange("api_key", e.target.value)}
|
||||
/>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
type="button"
|
||||
className="absolute right-2 top-1/2 transform -translate-y-1/2 h-7 w-7"
|
||||
onClick={() => setShowLlmApiKey(!showLlmApiKey)}
|
||||
>
|
||||
{showLlmApiKey ? <EyeOff className="h-4 w-4" /> : <Eye className="h-4 w-4" />}
|
||||
</Button>
|
||||
</div>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Use "env:API_KEY" to load from environment variable, or enter directly
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="flex items-center space-x-2 pt-2">
|
||||
<Switch id="llm-advanced-settings" checked={showLlmAdvanced} onCheckedChange={setShowLlmAdvanced} />
|
||||
<Label htmlFor="llm-advanced-settings">Show advanced settings</Label>
|
||||
</div>
|
||||
|
||||
{showLlmAdvanced && (
|
||||
<div className="space-y-6 pt-2">
|
||||
<div className="space-y-2">
|
||||
<div className="flex justify-between">
|
||||
<Label htmlFor="temperature">Temperature: {settings.mem0?.llm?.config?.temperature}</Label>
|
||||
</div>
|
||||
<Slider
|
||||
id="temperature"
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.1}
|
||||
value={[settings.mem0?.llm?.config?.temperature || 0.7]}
|
||||
onValueChange={(value) => handleLlmConfigChange("temperature", value[0])}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="max-tokens">Max Tokens</Label>
|
||||
<Input
|
||||
id="max-tokens"
|
||||
type="number"
|
||||
placeholder="2000"
|
||||
value={settings.mem0?.llm?.config?.max_tokens || ""}
|
||||
onChange={(e) => handleLlmConfigChange("max_tokens", Number.parseInt(e.target.value) || "")}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
{/* Embedder Settings */}
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>Embedder Settings</CardTitle>
|
||||
<CardDescription>Configure your Embedding Model provider and settings</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent className="space-y-6">
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="embedder-provider">Embedder Provider</Label>
|
||||
<Select
|
||||
value={settings.mem0?.embedder?.provider || ""}
|
||||
onValueChange={handleEmbedderProviderChange}
|
||||
>
|
||||
<SelectTrigger id="embedder-provider">
|
||||
<SelectValue placeholder="Select a provider" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{EMBEDDER_PROVIDERS.map((provider) => (
|
||||
<SelectItem key={provider} value={provider.toLowerCase()}>
|
||||
{provider}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="embedder-model">Model</Label>
|
||||
<Input
|
||||
id="embedder-model"
|
||||
placeholder="Enter model name"
|
||||
value={settings.mem0?.embedder?.config?.model || ""}
|
||||
onChange={(e) => handleEmbedderConfigChange("model", e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{isEmbedderOllama && (
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="embedder-ollama-url">Ollama Base URL</Label>
|
||||
<Input
|
||||
id="embedder-ollama-url"
|
||||
placeholder="http://host.docker.internal:11434"
|
||||
value={settings.mem0?.embedder?.config?.ollama_base_url || ""}
|
||||
onChange={(e) => handleEmbedderConfigChange("ollama_base_url", e.target.value)}
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Leave empty to use default: http://host.docker.internal:11434
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{needsEmbedderApiKey && (
|
||||
<div className="space-y-2">
|
||||
<Label htmlFor="embedder-api-key">API Key</Label>
|
||||
<div className="relative">
|
||||
<Input
|
||||
id="embedder-api-key"
|
||||
type={showEmbedderApiKey ? "text" : "password"}
|
||||
placeholder="env:API_KEY"
|
||||
value={settings.mem0?.embedder?.config?.api_key || ""}
|
||||
onChange={(e) => handleEmbedderConfigChange("api_key", e.target.value)}
|
||||
/>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
type="button"
|
||||
className="absolute right-2 top-1/2 transform -translate-y-1/2 h-7 w-7"
|
||||
onClick={() => setShowEmbedderApiKey(!showEmbedderApiKey)}
|
||||
>
|
||||
{showEmbedderApiKey ? <EyeOff className="h-4 w-4" /> : <Eye className="h-4 w-4" />}
|
||||
</Button>
|
||||
</div>
|
||||
<p className="text-xs text-muted-foreground mt-1">
|
||||
Use "env:API_KEY" to load from environment variable, or enter directly
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
79
openmemory/ui/components/json-editor.tsx
Normal file
79
openmemory/ui/components/json-editor.tsx
Normal file
@@ -0,0 +1,79 @@
|
||||
"use client"
|
||||
|
||||
import type React from "react"
|
||||
|
||||
import { useState, useEffect } from "react"
|
||||
import { AlertCircle, CheckCircle2 } from "lucide-react"
|
||||
import { Alert, AlertDescription } from "./ui/alert"
|
||||
import { Button } from "./ui/button"
|
||||
import { Textarea } from "./ui/textarea"
|
||||
|
||||
interface JsonEditorProps {
|
||||
value: any
|
||||
onChange: (value: any) => void
|
||||
}
|
||||
|
||||
export function JsonEditor({ value, onChange }: JsonEditorProps) {
|
||||
const [jsonString, setJsonString] = useState("")
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const [isValid, setIsValid] = useState(true)
|
||||
|
||||
useEffect(() => {
|
||||
try {
|
||||
setJsonString(JSON.stringify(value, null, 2))
|
||||
setIsValid(true)
|
||||
setError(null)
|
||||
} catch (err) {
|
||||
setError("Invalid JSON object")
|
||||
setIsValid(false)
|
||||
}
|
||||
}, [value])
|
||||
|
||||
const handleTextChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
|
||||
setJsonString(e.target.value)
|
||||
try {
|
||||
JSON.parse(e.target.value)
|
||||
setIsValid(true)
|
||||
setError(null)
|
||||
} catch (err) {
|
||||
setError("Invalid JSON syntax")
|
||||
setIsValid(false)
|
||||
}
|
||||
}
|
||||
|
||||
const handleApply = () => {
|
||||
try {
|
||||
const parsed = JSON.parse(jsonString)
|
||||
onChange(parsed)
|
||||
setIsValid(true)
|
||||
setError(null)
|
||||
} catch (err) {
|
||||
setError("Failed to apply changes: Invalid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<div className="relative">
|
||||
<Textarea value={jsonString} onChange={handleTextChange} className="font-mono h-[600px] resize-none" />
|
||||
<div className="absolute top-3 right-3">
|
||||
{isValid ? (
|
||||
<CheckCircle2 className="h-5 w-5 text-green-500" />
|
||||
) : (
|
||||
<AlertCircle className="h-5 w-5 text-red-500" />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<Alert variant="destructive">
|
||||
<AlertDescription>{error}</AlertDescription>
|
||||
</Alert>
|
||||
)}
|
||||
|
||||
<Button onClick={handleApply} disabled={!isValid} className="w-full">
|
||||
Apply Changes
|
||||
</Button>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
131
openmemory/ui/hooks/useConfig.ts
Normal file
131
openmemory/ui/hooks/useConfig.ts
Normal file
@@ -0,0 +1,131 @@
|
||||
import { useState } from 'react';
|
||||
import axios from 'axios';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
import { AppDispatch, RootState } from '@/store/store';
|
||||
import {
|
||||
setConfigLoading,
|
||||
setConfigSuccess,
|
||||
setConfigError,
|
||||
updateLLM,
|
||||
updateEmbedder,
|
||||
updateMem0Config,
|
||||
updateOpenMemory,
|
||||
LLMProvider,
|
||||
EmbedderProvider,
|
||||
Mem0Config,
|
||||
OpenMemoryConfig
|
||||
} from '@/store/configSlice';
|
||||
|
||||
interface UseConfigApiReturn {
|
||||
fetchConfig: () => Promise<void>;
|
||||
saveConfig: (config: { openmemory?: OpenMemoryConfig; mem0: Mem0Config }) => Promise<void>;
|
||||
saveLLMConfig: (llmConfig: LLMProvider) => Promise<void>;
|
||||
saveEmbedderConfig: (embedderConfig: EmbedderProvider) => Promise<void>;
|
||||
resetConfig: () => Promise<void>;
|
||||
isLoading: boolean;
|
||||
error: string | null;
|
||||
}
|
||||
|
||||
export const useConfig = (): UseConfigApiReturn => {
|
||||
const [isLoading, setIsLoading] = useState<boolean>(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const dispatch = useDispatch<AppDispatch>();
|
||||
const URL = process.env.NEXT_PUBLIC_API_URL || "http://localhost:8765";
|
||||
|
||||
const fetchConfig = async () => {
|
||||
setIsLoading(true);
|
||||
dispatch(setConfigLoading());
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${URL}/api/v1/config`);
|
||||
dispatch(setConfigSuccess(response.data));
|
||||
setIsLoading(false);
|
||||
} catch (err: any) {
|
||||
const errorMessage = err.response?.data?.detail || err.message || 'Failed to fetch configuration';
|
||||
dispatch(setConfigError(errorMessage));
|
||||
setError(errorMessage);
|
||||
setIsLoading(false);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
};
|
||||
|
||||
const saveConfig = async (config: { openmemory?: OpenMemoryConfig; mem0: Mem0Config }) => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
const response = await axios.put(`${URL}/api/v1/config`, config);
|
||||
dispatch(setConfigSuccess(response.data));
|
||||
setIsLoading(false);
|
||||
return response.data;
|
||||
} catch (err: any) {
|
||||
const errorMessage = err.response?.data?.detail || err.message || 'Failed to save configuration';
|
||||
dispatch(setConfigError(errorMessage));
|
||||
setError(errorMessage);
|
||||
setIsLoading(false);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
};
|
||||
|
||||
const resetConfig = async () => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
const response = await axios.post(`${URL}/api/v1/config/reset`);
|
||||
dispatch(setConfigSuccess(response.data));
|
||||
setIsLoading(false);
|
||||
return response.data;
|
||||
} catch (err: any) {
|
||||
const errorMessage = err.response?.data?.detail || err.message || 'Failed to reset configuration';
|
||||
dispatch(setConfigError(errorMessage));
|
||||
setError(errorMessage);
|
||||
setIsLoading(false);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
};
|
||||
|
||||
const saveLLMConfig = async (llmConfig: LLMProvider) => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
const response = await axios.put(`${URL}/api/v1/config/mem0/llm`, llmConfig);
|
||||
dispatch(updateLLM(response.data));
|
||||
setIsLoading(false);
|
||||
return response.data;
|
||||
} catch (err: any) {
|
||||
const errorMessage = err.response?.data?.detail || err.message || 'Failed to save LLM configuration';
|
||||
setError(errorMessage);
|
||||
setIsLoading(false);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
};
|
||||
|
||||
const saveEmbedderConfig = async (embedderConfig: EmbedderProvider) => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
|
||||
try {
|
||||
const response = await axios.put(`${URL}/api/v1/config/mem0/embedder`, embedderConfig);
|
||||
dispatch(updateEmbedder(response.data));
|
||||
setIsLoading(false);
|
||||
return response.data;
|
||||
} catch (err: any) {
|
||||
const errorMessage = err.response?.data?.detail || err.message || 'Failed to save Embedder configuration';
|
||||
setError(errorMessage);
|
||||
setIsLoading(false);
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
fetchConfig,
|
||||
saveConfig,
|
||||
saveLLMConfig,
|
||||
saveEmbedderConfig,
|
||||
resetConfig,
|
||||
isLoading,
|
||||
error
|
||||
};
|
||||
};
|
||||
114
openmemory/ui/store/configSlice.ts
Normal file
114
openmemory/ui/store/configSlice.ts
Normal file
@@ -0,0 +1,114 @@
|
||||
import { createSlice, PayloadAction } from '@reduxjs/toolkit';
|
||||
|
||||
export interface LLMConfig {
|
||||
model: string;
|
||||
temperature: number;
|
||||
max_tokens: number;
|
||||
api_key?: string;
|
||||
ollama_base_url?: string;
|
||||
}
|
||||
|
||||
export interface LLMProvider {
|
||||
provider: string;
|
||||
config: LLMConfig;
|
||||
}
|
||||
|
||||
export interface EmbedderConfig {
|
||||
model: string;
|
||||
api_key?: string;
|
||||
ollama_base_url?: string;
|
||||
}
|
||||
|
||||
export interface EmbedderProvider {
|
||||
provider: string;
|
||||
config: EmbedderConfig;
|
||||
}
|
||||
|
||||
export interface Mem0Config {
|
||||
llm?: LLMProvider;
|
||||
embedder?: EmbedderProvider;
|
||||
}
|
||||
|
||||
export interface OpenMemoryConfig {
|
||||
custom_instructions?: string | null;
|
||||
}
|
||||
|
||||
export interface ConfigState {
|
||||
openmemory: OpenMemoryConfig;
|
||||
mem0: Mem0Config;
|
||||
status: 'idle' | 'loading' | 'succeeded' | 'failed';
|
||||
error: string | null;
|
||||
}
|
||||
|
||||
const initialState: ConfigState = {
|
||||
openmemory: {
|
||||
custom_instructions: null,
|
||||
},
|
||||
mem0: {
|
||||
llm: {
|
||||
provider: 'openai',
|
||||
config: {
|
||||
model: 'gpt-4o-mini',
|
||||
temperature: 0.1,
|
||||
max_tokens: 2000,
|
||||
api_key: 'env:OPENAI_API_KEY',
|
||||
},
|
||||
},
|
||||
embedder: {
|
||||
provider: 'openai',
|
||||
config: {
|
||||
model: 'text-embedding-3-small',
|
||||
api_key: 'env:OPENAI_API_KEY',
|
||||
},
|
||||
},
|
||||
},
|
||||
status: 'idle',
|
||||
error: null,
|
||||
};
|
||||
|
||||
const configSlice = createSlice({
|
||||
name: 'config',
|
||||
initialState,
|
||||
reducers: {
|
||||
setConfigLoading: (state) => {
|
||||
state.status = 'loading';
|
||||
state.error = null;
|
||||
},
|
||||
setConfigSuccess: (state, action: PayloadAction<{ openmemory?: OpenMemoryConfig; mem0: Mem0Config }>) => {
|
||||
if (action.payload.openmemory) {
|
||||
state.openmemory = action.payload.openmemory;
|
||||
}
|
||||
state.mem0 = action.payload.mem0;
|
||||
state.status = 'succeeded';
|
||||
state.error = null;
|
||||
},
|
||||
setConfigError: (state, action: PayloadAction<string>) => {
|
||||
state.status = 'failed';
|
||||
state.error = action.payload;
|
||||
},
|
||||
updateOpenMemory: (state, action: PayloadAction<OpenMemoryConfig>) => {
|
||||
state.openmemory = action.payload;
|
||||
},
|
||||
updateLLM: (state, action: PayloadAction<LLMProvider>) => {
|
||||
state.mem0.llm = action.payload;
|
||||
},
|
||||
updateEmbedder: (state, action: PayloadAction<EmbedderProvider>) => {
|
||||
state.mem0.embedder = action.payload;
|
||||
},
|
||||
updateMem0Config: (state, action: PayloadAction<Mem0Config>) => {
|
||||
state.mem0 = action.payload;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const {
|
||||
setConfigLoading,
|
||||
setConfigSuccess,
|
||||
setConfigError,
|
||||
updateOpenMemory,
|
||||
updateLLM,
|
||||
updateEmbedder,
|
||||
updateMem0Config,
|
||||
} = configSlice.actions;
|
||||
|
||||
export default configSlice.reducer;
|
||||
@@ -4,6 +4,7 @@ import profileReducer from './profileSlice';
|
||||
import appsReducer from './appsSlice';
|
||||
import uiReducer from './uiSlice';
|
||||
import filtersReducer from './filtersSlice';
|
||||
import configReducer from './configSlice';
|
||||
|
||||
export const store = configureStore({
|
||||
reducer: {
|
||||
@@ -12,6 +13,7 @@ export const store = configureStore({
|
||||
apps: appsReducer,
|
||||
ui: uiReducer,
|
||||
filters: filtersReducer,
|
||||
config: configReducer,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
Reference in New Issue
Block a user