Fix timezone configuration and Ollama dependencies
- Fix mem0 library hardcoded US/Pacific timezone in Docker build - Add TZ=Europe/Prague environment variable to containers - Add missing ollama Python library to requirements.txt - Add Ollama environment variables to MCP container - Include test scripts for Ollama configuration validation This resolves timestamp issues where memories were created with incorrect Pacific timezone (-07:00) instead of local time (+02:00). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -37,6 +37,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "${API_PORT:-8080}:8080"
|
- "${API_PORT:-8080}:8080"
|
||||||
environment:
|
environment:
|
||||||
|
- TZ=Europe/Prague
|
||||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||||
- SUPABASE_CONNECTION_STRING=${SUPABASE_CONNECTION_STRING}
|
- SUPABASE_CONNECTION_STRING=${SUPABASE_CONNECTION_STRING}
|
||||||
- NEO4J_URI=neo4j://neo4j:7687
|
- NEO4J_URI=neo4j://neo4j:7687
|
||||||
@@ -71,17 +72,31 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "${MCP_PORT:-8765}:8765"
|
- "${MCP_PORT:-8765}:8765"
|
||||||
environment:
|
environment:
|
||||||
|
# System
|
||||||
|
- TZ=Europe/Prague
|
||||||
|
# LLM Provider Selection
|
||||||
|
- LLM_PROVIDER=${LLM_PROVIDER:-openai}
|
||||||
|
- EMBEDDER_PROVIDER=${EMBEDDER_PROVIDER:-openai}
|
||||||
|
# OpenAI (optional if using Ollama)
|
||||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||||
|
# Ollama Configuration
|
||||||
|
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://localhost:11434}
|
||||||
|
- OLLAMA_LLM_MODEL=${OLLAMA_LLM_MODEL:-llama3.1:8b}
|
||||||
|
- OLLAMA_EMBEDDING_MODEL=${OLLAMA_EMBEDDING_MODEL:-nomic-embed-text}
|
||||||
|
# Database
|
||||||
- SUPABASE_CONNECTION_STRING=${SUPABASE_CONNECTION_STRING}
|
- SUPABASE_CONNECTION_STRING=${SUPABASE_CONNECTION_STRING}
|
||||||
- NEO4J_URI=neo4j://neo4j:7687
|
- NEO4J_URI=neo4j://neo4j:7687
|
||||||
- NEO4J_USER=${NEO4J_USER:-neo4j}
|
- NEO4J_USER=${NEO4J_USER:-neo4j}
|
||||||
- NEO4J_PASSWORD=${NEO4J_PASSWORD}
|
- NEO4J_PASSWORD=${NEO4J_PASSWORD}
|
||||||
|
# API
|
||||||
- API_KEY=${API_KEY}
|
- API_KEY=${API_KEY}
|
||||||
- MCP_HOST=0.0.0.0
|
- MCP_HOST=0.0.0.0
|
||||||
- MCP_PORT=8765
|
- MCP_PORT=8765
|
||||||
|
# Mem0 Configuration
|
||||||
- MEM0_COLLECTION_NAME=${MEM0_COLLECTION_NAME:-t6_memories}
|
- MEM0_COLLECTION_NAME=${MEM0_COLLECTION_NAME:-t6_memories}
|
||||||
- MEM0_EMBEDDING_DIMS=${MEM0_EMBEDDING_DIMS:-1536}
|
- MEM0_EMBEDDING_DIMS=${MEM0_EMBEDDING_DIMS:-1536}
|
||||||
- MEM0_VERSION=${MEM0_VERSION:-v1.1}
|
- MEM0_VERSION=${MEM0_VERSION:-v1.1}
|
||||||
|
# Logging
|
||||||
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
||||||
- ENVIRONMENT=${ENVIRONMENT:-production}
|
- ENVIRONMENT=${ENVIRONMENT:-production}
|
||||||
depends_on:
|
depends_on:
|
||||||
|
|||||||
@@ -17,6 +17,9 @@ COPY requirements.txt .
|
|||||||
# Install Python dependencies
|
# Install Python dependencies
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
# Fix mem0 hardcoded US/Pacific timezone to use system timezone
|
||||||
|
RUN sed -i 's/datetime\.now(pytz\.timezone("US\/Pacific"))/datetime.now().astimezone()/g' /usr/local/lib/python3.11/site-packages/mem0/memory/main.py
|
||||||
|
|
||||||
# Copy application code
|
# Copy application code
|
||||||
COPY config.py .
|
COPY config.py .
|
||||||
COPY memory_cleanup.py .
|
COPY memory_cleanup.py .
|
||||||
|
|||||||
@@ -20,6 +20,9 @@ vecs==0.4.*
|
|||||||
# mem0ai 0.1.118 requires openai<1.110.0,>=1.90.0
|
# mem0ai 0.1.118 requires openai<1.110.0,>=1.90.0
|
||||||
openai>=1.90.0,<1.110.0
|
openai>=1.90.0,<1.110.0
|
||||||
|
|
||||||
|
# Ollama
|
||||||
|
ollama>=0.4.0
|
||||||
|
|
||||||
# Utilities
|
# Utilities
|
||||||
python-dotenv==1.0.*
|
python-dotenv==1.0.*
|
||||||
httpx==0.28.*
|
httpx==0.28.*
|
||||||
|
|||||||
97
test-ollama-config.py
Normal file
97
test-ollama-config.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test Ollama configuration
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from config import get_settings, get_mem0_config
|
||||||
|
|
||||||
|
def test_config():
|
||||||
|
"""Test configuration loading with Ollama"""
|
||||||
|
|
||||||
|
print("=" * 60)
|
||||||
|
print("Testing Ollama Configuration")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
# Load settings
|
||||||
|
print("\n1. Loading settings from .env...")
|
||||||
|
try:
|
||||||
|
settings = get_settings()
|
||||||
|
print(f" ✓ Settings loaded successfully")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to load settings: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Display provider configuration
|
||||||
|
print(f"\n2. Provider Configuration:")
|
||||||
|
print(f" LLM Provider: {settings.llm_provider}")
|
||||||
|
print(f" Embedder Provider: {settings.embedder_provider}")
|
||||||
|
|
||||||
|
if settings.llm_provider.lower() == "ollama":
|
||||||
|
print(f"\n3. Ollama LLM Settings:")
|
||||||
|
print(f" Base URL: {settings.ollama_base_url}")
|
||||||
|
print(f" LLM Model: {settings.ollama_llm_model}")
|
||||||
|
|
||||||
|
if settings.embedder_provider.lower() == "ollama":
|
||||||
|
print(f"\n4. Ollama Embedder Settings:")
|
||||||
|
print(f" Base URL: {settings.ollama_base_url}")
|
||||||
|
print(f" Embedding Model: {settings.ollama_embedding_model}")
|
||||||
|
print(f" Embedding Dims: {settings.mem0_embedding_dims}")
|
||||||
|
|
||||||
|
# Generate mem0 config
|
||||||
|
print(f"\n5. Generating mem0 configuration...")
|
||||||
|
try:
|
||||||
|
mem0_config = get_mem0_config(settings)
|
||||||
|
print(f" ✓ Mem0 config generated successfully")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to generate mem0 config: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Display mem0 config
|
||||||
|
print(f"\n6. Mem0 Configuration:")
|
||||||
|
print(f" Vector Store: {mem0_config['vector_store']['provider']}")
|
||||||
|
print(f" Graph Store: {mem0_config['graph_store']['provider']}")
|
||||||
|
print(f" LLM Provider: {mem0_config['llm']['provider']}")
|
||||||
|
print(f" LLM Model: {mem0_config['llm']['config'].get('model', 'N/A')}")
|
||||||
|
print(f" Embedder Provider: {mem0_config['embedder']['provider']}")
|
||||||
|
print(f" Embedder Model: {mem0_config['embedder']['config'].get('model', 'N/A')}")
|
||||||
|
|
||||||
|
# Test Ollama connectivity
|
||||||
|
if settings.llm_provider.lower() == "ollama" or settings.embedder_provider.lower() == "ollama":
|
||||||
|
print(f"\n7. Testing Ollama connectivity...")
|
||||||
|
import httpx
|
||||||
|
try:
|
||||||
|
response = httpx.get(f"{settings.ollama_base_url}/api/tags", timeout=5.0)
|
||||||
|
if response.status_code == 200:
|
||||||
|
print(f" ✓ Ollama is reachable at {settings.ollama_base_url}")
|
||||||
|
models = response.json()
|
||||||
|
model_names = [m['name'] for m in models.get('models', [])]
|
||||||
|
|
||||||
|
# Check if required models are available
|
||||||
|
if settings.llm_provider.lower() == "ollama":
|
||||||
|
if settings.ollama_llm_model in model_names or f"{settings.ollama_llm_model}:latest" in model_names:
|
||||||
|
print(f" ✓ LLM model '{settings.ollama_llm_model}' is available")
|
||||||
|
else:
|
||||||
|
print(f" ✗ LLM model '{settings.ollama_llm_model}' not found")
|
||||||
|
print(f" Available models: {', '.join(model_names[:5])}")
|
||||||
|
|
||||||
|
if settings.embedder_provider.lower() == "ollama":
|
||||||
|
if settings.ollama_embedding_model in model_names or f"{settings.ollama_embedding_model}:latest" in model_names:
|
||||||
|
print(f" ✓ Embedding model '{settings.ollama_embedding_model}' is available")
|
||||||
|
else:
|
||||||
|
print(f" ✗ Embedding model '{settings.ollama_embedding_model}' not found")
|
||||||
|
else:
|
||||||
|
print(f" ✗ Ollama returned status code: {response.status_code}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Cannot reach Ollama: {e}")
|
||||||
|
|
||||||
|
print("\n" + "=" * 60)
|
||||||
|
print("Configuration test completed successfully!")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
success = test_config()
|
||||||
|
sys.exit(0 if success else 1)
|
||||||
118
test-ollama-memory.py
Normal file
118
test-ollama-memory.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test memory operations with Ollama
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from mem0 import Memory
|
||||||
|
from config import mem0_config
|
||||||
|
|
||||||
|
def test_memory_operations():
|
||||||
|
"""Test memory operations with Ollama LLM and embeddings"""
|
||||||
|
|
||||||
|
print("=" * 60)
|
||||||
|
print("Testing Memory Operations with Ollama")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
# Initialize mem0
|
||||||
|
print("\n1. Initializing mem0 with Ollama...")
|
||||||
|
try:
|
||||||
|
memory = Memory.from_config(mem0_config)
|
||||||
|
print(" ✓ Mem0 initialized successfully")
|
||||||
|
print(f" LLM: {mem0_config['llm']['provider']} ({mem0_config['llm']['config']['model']})")
|
||||||
|
print(f" Embedder: {mem0_config['embedder']['provider']} ({mem0_config['embedder']['config']['model']})")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to initialize mem0: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Test 1: Add memory
|
||||||
|
print("\n2. Test: Adding memory...")
|
||||||
|
try:
|
||||||
|
messages = [
|
||||||
|
{"role": "user", "content": "I am testing Ollama integration with local LLM models"},
|
||||||
|
{"role": "assistant", "content": "Great! I'll remember that you're testing Ollama."}
|
||||||
|
]
|
||||||
|
result = memory.add(messages, user_id="ollama_test_user")
|
||||||
|
print(f" ✓ Memory added successfully")
|
||||||
|
print(f" Result: {result}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to add memory: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Test 2: Search memory
|
||||||
|
print("\n3. Test: Searching memories...")
|
||||||
|
try:
|
||||||
|
search_results = memory.search("What am I testing?", user_id="ollama_test_user")
|
||||||
|
print(f" ✓ Search completed successfully")
|
||||||
|
print(f" Found {len(search_results)} results")
|
||||||
|
if search_results:
|
||||||
|
for i, result in enumerate(search_results, 1):
|
||||||
|
if isinstance(result, dict):
|
||||||
|
print(f" Result {i}: {result.get('memory', 'N/A')}")
|
||||||
|
else:
|
||||||
|
print(f" Result {i}: {result}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to search memories: {e}")
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Test 3: Get all memories
|
||||||
|
print("\n4. Test: Getting all memories...")
|
||||||
|
try:
|
||||||
|
all_memories = memory.get_all(user_id="ollama_test_user")
|
||||||
|
print(f" ✓ Retrieved all memories successfully")
|
||||||
|
print(f" Total memories: {len(all_memories)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to get all memories: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Test 4: Add another memory
|
||||||
|
print("\n5. Test: Adding another memory...")
|
||||||
|
try:
|
||||||
|
messages = [
|
||||||
|
{"role": "user", "content": "Ollama provides privacy and cost savings compared to OpenAI"},
|
||||||
|
{"role": "assistant", "content": "Noted! Those are great advantages of local LLMs."}
|
||||||
|
]
|
||||||
|
result = memory.add(messages, user_id="ollama_test_user")
|
||||||
|
print(f" ✓ Second memory added successfully")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to add second memory: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Test 5: Search again
|
||||||
|
print("\n6. Test: Searching for Ollama advantages...")
|
||||||
|
try:
|
||||||
|
search_results = memory.search("What are the benefits of Ollama?", user_id="ollama_test_user")
|
||||||
|
print(f" ✓ Search completed successfully")
|
||||||
|
print(f" Found {len(search_results)} results")
|
||||||
|
if search_results:
|
||||||
|
for i, result in enumerate(search_results, 1):
|
||||||
|
if isinstance(result, dict):
|
||||||
|
memory_text = result.get('memory', str(result))
|
||||||
|
score = result.get('score', 0)
|
||||||
|
print(f" Result {i} (score: {score:.3f}): {memory_text}")
|
||||||
|
else:
|
||||||
|
print(f" Result {i}: {result}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ✗ Failed to search memories: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Test 6: Cleanup
|
||||||
|
print("\n7. Test: Cleaning up test data...")
|
||||||
|
try:
|
||||||
|
memory.delete_all(user_id="ollama_test_user")
|
||||||
|
print(f" ✓ Test data cleaned up successfully")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ⚠ Warning: Failed to cleanup: {e}")
|
||||||
|
|
||||||
|
print("\n" + "=" * 60)
|
||||||
|
print("All memory operations completed successfully!")
|
||||||
|
print("Ollama integration is working correctly.")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
success = test_memory_operations()
|
||||||
|
sys.exit(0 if success else 1)
|
||||||
Reference in New Issue
Block a user