Initial commit: LangMem fact-based AI memory system with docs and MCP integration

- Complete fact-based memory API with mem0-inspired approach
- Individual fact extraction and deduplication
- ADD/UPDATE/DELETE memory actions
- Precision search with 0.86+ similarity scores
- MCP server for Claude Code integration
- Neo4j graph relationships and PostgreSQL vector storage
- Comprehensive documentation with architecture and API docs
- Matrix communication integration
- Production-ready Docker setup with Ollama and Supabase

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Docker Config Backup
2025-07-17 13:16:19 +02:00
commit 46faa78237
43 changed files with 9086 additions and 0 deletions

112
tests/conftest.py Normal file
View File

@@ -0,0 +1,112 @@
#!/usr/bin/env python3
"""
Test configuration and fixtures for LangMem API tests
"""
import pytest
import asyncio
import httpx
from typing import AsyncGenerator
@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for the test session."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def api_client() -> AsyncGenerator[httpx.AsyncClient, None]:
"""Create an async HTTP client for API testing."""
async with httpx.AsyncClient(
base_url="http://localhost:8765",
timeout=30.0
) as client:
yield client
@pytest.fixture
def auth_headers():
"""Provide authentication headers for API requests."""
return {"Authorization": "Bearer langmem_api_key_2025"}
@pytest.fixture
def test_user_id():
"""Generate a unique test user ID."""
import uuid
return f"test_user_{uuid.uuid4()}"
@pytest.fixture
def test_session_id():
"""Generate a unique test session ID."""
import uuid
return f"test_session_{uuid.uuid4()}"
@pytest.fixture
def sample_memory():
"""Provide sample memory data for testing."""
return {
"content": "This is a sample memory for testing purposes",
"metadata": {
"category": "test",
"importance": "low",
"tags": ["sample", "test", "memory"]
}
}
@pytest.fixture
def sample_conversation():
"""Provide sample conversation data for testing."""
return [
{"role": "user", "content": "Hello, I need help with Python programming"},
{"role": "assistant", "content": "I'd be happy to help with Python programming. What specific topic would you like to learn about?"},
{"role": "user", "content": "I want to learn about web frameworks"}
]
@pytest.fixture(scope="session")
async def wait_for_api():
"""Wait for API to be ready before running tests."""
import time
max_retries = 30
retry_delay = 2
for attempt in range(max_retries):
try:
async with httpx.AsyncClient() as client:
response = await client.get("http://localhost:8765/health", timeout=5.0)
if response.status_code == 200:
print("✅ API is ready for testing")
return
except:
pass
if attempt < max_retries - 1:
print(f"⏳ Waiting for API to be ready (attempt {attempt + 1}/{max_retries})")
time.sleep(retry_delay)
raise RuntimeError("API failed to become ready within the timeout period")
# Configure pytest marks
pytest_plugins = []
def pytest_configure(config):
"""Configure pytest with custom markers."""
config.addinivalue_line(
"markers", "integration: mark test as integration test"
)
config.addinivalue_line(
"markers", "slow: mark test as slow running"
)
config.addinivalue_line(
"markers", "unit: mark test as unit test"
)
def pytest_collection_modifyitems(config, items):
"""Modify test collection to add markers automatically."""
for item in items:
# Add integration marker to integration tests
if "integration" in item.nodeid:
item.add_marker(pytest.mark.integration)
# Add slow marker to tests that typically take longer
if any(keyword in item.name for keyword in ["full_workflow", "health_monitoring"]):
item.add_marker(pytest.mark.slow)