Files
t6_mem0_v2/test-ollama-memory.py
Claude Code 56aa8699cc Fix timezone configuration and Ollama dependencies
- Fix mem0 library hardcoded US/Pacific timezone in Docker build
- Add TZ=Europe/Prague environment variable to containers
- Add missing ollama Python library to requirements.txt
- Add Ollama environment variables to MCP container
- Include test scripts for Ollama configuration validation

This resolves timestamp issues where memories were created with
incorrect Pacific timezone (-07:00) instead of local time (+02:00).

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-16 07:54:41 +02:00

119 lines
4.3 KiB
Python

#!/usr/bin/env python3
"""
Test memory operations with Ollama
"""
import sys
from mem0 import Memory
from config import mem0_config
def test_memory_operations():
"""Test memory operations with Ollama LLM and embeddings"""
print("=" * 60)
print("Testing Memory Operations with Ollama")
print("=" * 60)
# Initialize mem0
print("\n1. Initializing mem0 with Ollama...")
try:
memory = Memory.from_config(mem0_config)
print(" ✓ Mem0 initialized successfully")
print(f" LLM: {mem0_config['llm']['provider']} ({mem0_config['llm']['config']['model']})")
print(f" Embedder: {mem0_config['embedder']['provider']} ({mem0_config['embedder']['config']['model']})")
except Exception as e:
print(f" ✗ Failed to initialize mem0: {e}")
return False
# Test 1: Add memory
print("\n2. Test: Adding memory...")
try:
messages = [
{"role": "user", "content": "I am testing Ollama integration with local LLM models"},
{"role": "assistant", "content": "Great! I'll remember that you're testing Ollama."}
]
result = memory.add(messages, user_id="ollama_test_user")
print(f" ✓ Memory added successfully")
print(f" Result: {result}")
except Exception as e:
print(f" ✗ Failed to add memory: {e}")
return False
# Test 2: Search memory
print("\n3. Test: Searching memories...")
try:
search_results = memory.search("What am I testing?", user_id="ollama_test_user")
print(f" ✓ Search completed successfully")
print(f" Found {len(search_results)} results")
if search_results:
for i, result in enumerate(search_results, 1):
if isinstance(result, dict):
print(f" Result {i}: {result.get('memory', 'N/A')}")
else:
print(f" Result {i}: {result}")
except Exception as e:
print(f" ✗ Failed to search memories: {e}")
import traceback
traceback.print_exc()
return False
# Test 3: Get all memories
print("\n4. Test: Getting all memories...")
try:
all_memories = memory.get_all(user_id="ollama_test_user")
print(f" ✓ Retrieved all memories successfully")
print(f" Total memories: {len(all_memories)}")
except Exception as e:
print(f" ✗ Failed to get all memories: {e}")
return False
# Test 4: Add another memory
print("\n5. Test: Adding another memory...")
try:
messages = [
{"role": "user", "content": "Ollama provides privacy and cost savings compared to OpenAI"},
{"role": "assistant", "content": "Noted! Those are great advantages of local LLMs."}
]
result = memory.add(messages, user_id="ollama_test_user")
print(f" ✓ Second memory added successfully")
except Exception as e:
print(f" ✗ Failed to add second memory: {e}")
return False
# Test 5: Search again
print("\n6. Test: Searching for Ollama advantages...")
try:
search_results = memory.search("What are the benefits of Ollama?", user_id="ollama_test_user")
print(f" ✓ Search completed successfully")
print(f" Found {len(search_results)} results")
if search_results:
for i, result in enumerate(search_results, 1):
if isinstance(result, dict):
memory_text = result.get('memory', str(result))
score = result.get('score', 0)
print(f" Result {i} (score: {score:.3f}): {memory_text}")
else:
print(f" Result {i}: {result}")
except Exception as e:
print(f" ✗ Failed to search memories: {e}")
return False
# Test 6: Cleanup
print("\n7. Test: Cleaning up test data...")
try:
memory.delete_all(user_id="ollama_test_user")
print(f" ✓ Test data cleaned up successfully")
except Exception as e:
print(f" ⚠ Warning: Failed to cleanup: {e}")
print("\n" + "=" * 60)
print("All memory operations completed successfully!")
print("Ollama integration is working correctly.")
print("=" * 60)
return True
if __name__ == "__main__":
success = test_memory_operations()
sys.exit(0 if success else 1)