Files
t66_langmem/tests/test_integration.py
Docker Config Backup 46faa78237 Initial commit: LangMem fact-based AI memory system with docs and MCP integration
- Complete fact-based memory API with mem0-inspired approach
- Individual fact extraction and deduplication
- ADD/UPDATE/DELETE memory actions
- Precision search with 0.86+ similarity scores
- MCP server for Claude Code integration
- Neo4j graph relationships and PostgreSQL vector storage
- Comprehensive documentation with architecture and API docs
- Matrix communication integration
- Production-ready Docker setup with Ollama and Supabase

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-07-17 13:16:19 +02:00

332 lines
11 KiB
Python

#!/usr/bin/env python3
"""
Integration tests for LangMem API with real services
"""
import asyncio
import json
import pytest
import httpx
from uuid import uuid4
import time
# Configuration
API_BASE_URL = "http://localhost:8765"
API_KEY = "langmem_api_key_2025"
class TestLangMemIntegration:
"""Integration test suite for LangMem API"""
def setup_method(self):
"""Setup test client"""
self.client = httpx.AsyncClient(base_url=API_BASE_URL, timeout=30.0)
self.headers = {"Authorization": f"Bearer {API_KEY}"}
self.test_user_id = f"integration_user_{uuid4()}"
self.test_session_id = f"integration_session_{uuid4()}"
async def teardown_method(self):
"""Cleanup test client"""
await self.client.aclose()
@pytest.mark.asyncio
async def test_full_memory_workflow(self):
"""Test complete memory workflow: store -> search -> retrieve -> delete"""
# Step 1: Store multiple memories
memories_data = [
{
"content": "FastAPI is a modern web framework for building APIs with Python",
"user_id": self.test_user_id,
"session_id": self.test_session_id,
"metadata": {
"category": "programming",
"framework": "fastapi",
"language": "python"
}
},
{
"content": "Docker containers provide isolated environments for applications",
"user_id": self.test_user_id,
"session_id": self.test_session_id,
"metadata": {
"category": "devops",
"technology": "docker"
}
},
{
"content": "Vector databases are excellent for similarity search and AI applications",
"user_id": self.test_user_id,
"session_id": self.test_session_id,
"metadata": {
"category": "ai",
"technology": "vector_database"
}
}
]
stored_ids = []
for memory_data in memories_data:
response = await self.client.post(
"/v1/memories/store",
json=memory_data,
headers=self.headers
)
assert response.status_code == 200
data = response.json()
stored_ids.append(data["id"])
print(f"✅ Stored memory: {data['id']}")
# Wait for indexing
await asyncio.sleep(2)
# Step 2: Search for memories
search_queries = [
"Python web framework",
"containerization technology",
"AI similarity search"
]
for query in search_queries:
search_data = {
"query": query,
"user_id": self.test_user_id,
"limit": 5,
"threshold": 0.5,
"include_graph": True
}
response = await self.client.post(
"/v1/memories/search",
json=search_data,
headers=self.headers
)
assert response.status_code == 200
data = response.json()
assert data["total_count"] > 0
print(f"✅ Search '{query}' found {data['total_count']} memories")
# Step 3: Test conversation-based retrieval
retrieve_data = {
"messages": [
{"role": "user", "content": "I'm working on a Python API project"},
{"role": "assistant", "content": "That's great! What framework are you using?"},
{"role": "user", "content": "I need something fast and modern for building APIs"}
],
"user_id": self.test_user_id,
"session_id": self.test_session_id
}
response = await self.client.post(
"/v1/memories/retrieve",
json=retrieve_data,
headers=self.headers
)
assert response.status_code == 200
data = response.json()
assert "memories" in data
print(f"✅ Retrieved {data['total_count']} memories for conversation")
# Step 4: Get all user memories
response = await self.client.get(
f"/v1/memories/users/{self.test_user_id}",
headers=self.headers
)
assert response.status_code == 200
data = response.json()
assert data["total_count"] >= 3
print(f"✅ User has {data['total_count']} total memories")
# Step 5: Clean up - delete stored memories
for memory_id in stored_ids:
response = await self.client.delete(
f"/v1/memories/{memory_id}",
headers=self.headers
)
assert response.status_code == 200
print(f"✅ Deleted memory: {memory_id}")
@pytest.mark.asyncio
async def test_similarity_search_accuracy(self):
"""Test accuracy of similarity search"""
# Store memories with different topics
test_memories = [
{
"content": "Machine learning models require large datasets for training",
"user_id": self.test_user_id,
"metadata": {"topic": "ml_training"}
},
{
"content": "Neural networks use backpropagation for learning",
"user_id": self.test_user_id,
"metadata": {"topic": "neural_networks"}
},
{
"content": "Database indexing improves query performance",
"user_id": self.test_user_id,
"metadata": {"topic": "database_performance"}
}
]
stored_ids = []
for memory in test_memories:
response = await self.client.post(
"/v1/memories/store",
json=memory,
headers=self.headers
)
assert response.status_code == 200
stored_ids.append(response.json()["id"])
# Wait for indexing
await asyncio.sleep(2)
# Test search with different queries
test_cases = [
{
"query": "deep learning training data",
"expected_topic": "ml_training",
"min_similarity": 0.6
},
{
"query": "backpropagation algorithm",
"expected_topic": "neural_networks",
"min_similarity": 0.6
},
{
"query": "database optimization",
"expected_topic": "database_performance",
"min_similarity": 0.6
}
]
for test_case in test_cases:
search_data = {
"query": test_case["query"],
"user_id": self.test_user_id,
"limit": 3,
"threshold": 0.5
}
response = await self.client.post(
"/v1/memories/search",
json=search_data,
headers=self.headers
)
assert response.status_code == 200
data = response.json()
assert data["total_count"] > 0
# Check that the most similar result matches expected topic
top_result = data["memories"][0]
assert top_result["similarity"] >= test_case["min_similarity"]
assert top_result["metadata"]["topic"] == test_case["expected_topic"]
print(f"✅ Query '{test_case['query']}' correctly matched topic '{test_case['expected_topic']}' with similarity {top_result['similarity']:.3f}")
# Cleanup
for memory_id in stored_ids:
await self.client.delete(f"/v1/memories/{memory_id}", headers=self.headers)
@pytest.mark.asyncio
async def test_user_isolation(self):
"""Test that memories are properly isolated between users"""
user1_id = f"user1_{uuid4()}"
user2_id = f"user2_{uuid4()}"
# Store memory for user1
memory1_data = {
"content": "User 1 private information about project Alpha",
"user_id": user1_id,
"metadata": {"privacy": "private"}
}
response = await self.client.post(
"/v1/memories/store",
json=memory1_data,
headers=self.headers
)
assert response.status_code == 200
memory1_id = response.json()["id"]
# Store memory for user2
memory2_data = {
"content": "User 2 private information about project Beta",
"user_id": user2_id,
"metadata": {"privacy": "private"}
}
response = await self.client.post(
"/v1/memories/store",
json=memory2_data,
headers=self.headers
)
assert response.status_code == 200
memory2_id = response.json()["id"]
# Wait for indexing
await asyncio.sleep(1)
# Search as user1 - should only find user1's memories
search_data = {
"query": "private information project",
"user_id": user1_id,
"limit": 10,
"threshold": 0.3
}
response = await self.client.post(
"/v1/memories/search",
json=search_data,
headers=self.headers
)
assert response.status_code == 200
data = response.json()
# Should only find user1's memory
for memory in data["memories"]:
assert memory["user_id"] == user1_id
assert "Alpha" in memory["content"]
assert "Beta" not in memory["content"]
print(f"✅ User isolation test passed - user1 found {data['total_count']} memories")
# Cleanup
await self.client.delete(f"/v1/memories/{memory1_id}", headers=self.headers)
await self.client.delete(f"/v1/memories/{memory2_id}", headers=self.headers)
@pytest.mark.asyncio
async def test_service_health_monitoring(self):
"""Test service health monitoring"""
response = await self.client.get("/health")
assert response.status_code == 200
health_data = response.json()
# Check overall status
assert health_data["status"] in ["healthy", "degraded", "unhealthy"]
# Check individual services
services = health_data["services"]
required_services = ["ollama", "supabase", "neo4j", "postgres"]
for service in required_services:
assert service in services
service_status = services[service]
print(f"Service {service}: {service_status}")
# For integration tests, we expect core services to be healthy
if service in ["ollama", "supabase", "postgres"]:
assert service_status == "healthy", f"Required service {service} is not healthy"
print(f"✅ Health check passed - overall status: {health_data['status']}")
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])