Integrate self-hosted Supabase with mem0 system
- Configure mem0 to use self-hosted Supabase instead of Qdrant for vector storage - Update docker-compose to connect containers to localai network - Install vecs library for Supabase pgvector integration - Create comprehensive test suite for Supabase + mem0 integration - Update documentation to reflect Supabase configuration - All containers now connected to shared localai network - Successful vector storage and retrieval tests completed 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
184
test_all_connections.py
Normal file
184
test_all_connections.py
Normal file
@@ -0,0 +1,184 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive test of all database and service connections for mem0 system
|
||||
"""
|
||||
|
||||
import os
|
||||
import requests
|
||||
import json
|
||||
from dotenv import load_dotenv
|
||||
from config import load_config, get_mem0_config
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
def test_qdrant_connection():
|
||||
"""Test Qdrant vector database connection"""
|
||||
try:
|
||||
print("Testing Qdrant connection...")
|
||||
response = requests.get("http://localhost:6333/collections")
|
||||
if response.status_code == 200:
|
||||
print("✅ Qdrant is accessible")
|
||||
collections = response.json()
|
||||
print(f" Current collections: {len(collections.get('result', {}).get('collections', []))}")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Qdrant error: {response.status_code}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Qdrant connection failed: {e}")
|
||||
return False
|
||||
|
||||
def test_neo4j_connection():
|
||||
"""Test Neo4j graph database connection"""
|
||||
try:
|
||||
print("Testing Neo4j connection...")
|
||||
from neo4j import GraphDatabase
|
||||
|
||||
config = load_config()
|
||||
driver = GraphDatabase.driver(
|
||||
config.database.neo4j_uri,
|
||||
auth=(config.database.neo4j_username, config.database.neo4j_password)
|
||||
)
|
||||
|
||||
with driver.session() as session:
|
||||
result = session.run("RETURN 'Hello Neo4j!' as message")
|
||||
record = result.single()
|
||||
if record and record["message"] == "Hello Neo4j!":
|
||||
print("✅ Neo4j is accessible and working")
|
||||
|
||||
# Check Neo4j version
|
||||
version_result = session.run("CALL dbms.components() YIELD versions RETURN versions")
|
||||
version_record = version_result.single()
|
||||
if version_record:
|
||||
print(f" Neo4j version: {version_record['versions'][0]}")
|
||||
|
||||
driver.close()
|
||||
return True
|
||||
driver.close()
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Neo4j connection failed: {e}")
|
||||
return False
|
||||
|
||||
def test_supabase_connection():
|
||||
"""Test Supabase connection"""
|
||||
try:
|
||||
print("Testing Supabase connection...")
|
||||
config = load_config()
|
||||
|
||||
if not config.database.supabase_url or not config.database.supabase_key:
|
||||
print("❌ Supabase configuration missing")
|
||||
return False
|
||||
|
||||
headers = {
|
||||
"apikey": config.database.supabase_key,
|
||||
"Authorization": f"Bearer {config.database.supabase_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# Test basic API connection
|
||||
response = requests.get(f"{config.database.supabase_url}/rest/v1/", headers=headers)
|
||||
if response.status_code == 200:
|
||||
print("✅ Supabase is accessible")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Supabase error: {response.status_code} - {response.text}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Supabase connection failed: {e}")
|
||||
return False
|
||||
|
||||
def test_ollama_connection():
|
||||
"""Test Ollama local LLM connection"""
|
||||
try:
|
||||
print("Testing Ollama connection...")
|
||||
response = requests.get("http://localhost:11434/api/tags")
|
||||
if response.status_code == 200:
|
||||
models = response.json()
|
||||
model_names = [model["name"] for model in models.get("models", [])]
|
||||
print("✅ Ollama is accessible")
|
||||
print(f" Available models: {len(model_names)}")
|
||||
print(f" Recommended models: {[m for m in model_names if 'llama3' in m or 'qwen' in m or 'nomic-embed' in m][:3]}")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Ollama error: {response.status_code}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Ollama connection failed: {e}")
|
||||
return False
|
||||
|
||||
def test_mem0_integration():
|
||||
"""Test mem0 integration with available services"""
|
||||
try:
|
||||
print("\nTesting mem0 integration...")
|
||||
config = load_config()
|
||||
|
||||
# Test with Qdrant (default vector store)
|
||||
print("Testing mem0 with Qdrant vector store...")
|
||||
mem0_config = {
|
||||
"vector_store": {
|
||||
"provider": "qdrant",
|
||||
"config": {
|
||||
"host": "localhost",
|
||||
"port": 6333
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Test if we can initialize (without LLM for now)
|
||||
from mem0.configs.base import MemoryConfig
|
||||
try:
|
||||
config_obj = MemoryConfig(**mem0_config)
|
||||
print("✅ Mem0 configuration validation passed")
|
||||
except Exception as e:
|
||||
print(f"❌ Mem0 configuration validation failed: {e}")
|
||||
return False
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Mem0 integration test failed: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run all connection tests"""
|
||||
print("=" * 60)
|
||||
print("MEM0 SYSTEM CONNECTION TESTS")
|
||||
print("=" * 60)
|
||||
|
||||
results = {}
|
||||
|
||||
# Test all connections
|
||||
results["qdrant"] = test_qdrant_connection()
|
||||
results["neo4j"] = test_neo4j_connection()
|
||||
results["supabase"] = test_supabase_connection()
|
||||
results["ollama"] = test_ollama_connection()
|
||||
results["mem0"] = test_mem0_integration()
|
||||
|
||||
# Summary
|
||||
print("\n" + "=" * 60)
|
||||
print("CONNECTION TEST SUMMARY")
|
||||
print("=" * 60)
|
||||
|
||||
total_tests = len(results)
|
||||
passed_tests = sum(results.values())
|
||||
|
||||
for service, status in results.items():
|
||||
status_symbol = "✅" if status else "❌"
|
||||
print(f"{status_symbol} {service.upper()}: {'PASS' if status else 'FAIL'}")
|
||||
|
||||
print(f"\nOverall: {passed_tests}/{total_tests} tests passed")
|
||||
|
||||
if passed_tests == total_tests:
|
||||
print("🎉 All systems are ready!")
|
||||
print("\nNext steps:")
|
||||
print("1. Add OpenAI API key to .env file for initial testing")
|
||||
print("2. Run test_openai.py to verify OpenAI integration")
|
||||
print("3. Start building the core memory system")
|
||||
else:
|
||||
print("💥 Some systems need attention before proceeding")
|
||||
|
||||
return passed_tests == total_tests
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user