Files
t66_langmem/test_ai_relationships.py
Docker Config Backup 46faa78237 Initial commit: LangMem fact-based AI memory system with docs and MCP integration
- Complete fact-based memory API with mem0-inspired approach
- Individual fact extraction and deduplication
- ADD/UPDATE/DELETE memory actions
- Precision search with 0.86+ similarity scores
- MCP server for Claude Code integration
- Neo4j graph relationships and PostgreSQL vector storage
- Comprehensive documentation with architecture and API docs
- Matrix communication integration
- Production-ready Docker setup with Ollama and Supabase

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-07-17 13:16:19 +02:00

158 lines
6.1 KiB
Python

#!/usr/bin/env python3
"""
Test AI-powered relationship extraction from various types of content
"""
import asyncio
import httpx
import json
# Configuration
API_BASE_URL = "http://localhost:8765"
API_KEY = "langmem_api_key_2025"
# Test content with different types of relationships
test_documents = [
{
"content": "Ondrej has a son named Cyril who is 8 years old and loves playing soccer",
"user_id": "test_user",
"session_id": "ai_test_session",
"metadata": {
"category": "family",
"type": "personal_info"
}
},
{
"content": "Apple Inc. was founded by Steve Jobs, Steve Wozniak, and Ronald Wayne in Cupertino, California",
"user_id": "test_user",
"session_id": "ai_test_session",
"metadata": {
"category": "business",
"type": "company_history"
}
},
{
"content": "Python is a programming language created by Guido van Rossum and is widely used for web development, data science, and machine learning",
"user_id": "test_user",
"session_id": "ai_test_session",
"metadata": {
"category": "technology",
"type": "programming_languages"
}
},
{
"content": "The Eiffel Tower is located in Paris, France and was designed by Gustave Eiffel for the 1889 World's Fair",
"user_id": "test_user",
"session_id": "ai_test_session",
"metadata": {
"category": "architecture",
"type": "landmarks"
}
},
{
"content": "Einstein worked at Princeton University and developed the theory of relativity which revolutionized physics",
"user_id": "test_user",
"session_id": "ai_test_session",
"metadata": {
"category": "science",
"type": "historical_figures"
}
}
]
async def test_ai_relationship_extraction():
"""Test AI-powered relationship extraction"""
print("🤖 Testing AI-Powered Relationship Extraction")
print("=" * 60)
headers = {"Authorization": f"Bearer {API_KEY}"}
async with httpx.AsyncClient() as client:
stored_memories = []
for i, doc in enumerate(test_documents, 1):
print(f"\n{i}. Processing: {doc['content'][:60]}...")
try:
# Store memory - AI will automatically extract relationships
response = await client.post(
f"{API_BASE_URL}/v1/memories/store",
json=doc,
headers=headers,
timeout=60.0 # Longer timeout for AI processing
)
if response.status_code == 200:
data = response.json()
stored_memories.append(data)
print(f" ✅ Memory stored with ID: {data['id']}")
print(f" 🔄 AI relationship extraction running in background...")
else:
print(f" ❌ Failed: {response.status_code}")
print(f" Response: {response.text}")
except Exception as e:
print(f" ❌ Error: {e}")
# Wait for AI processing to complete
print(f"\n⏳ Waiting for AI relationship extraction to complete...")
await asyncio.sleep(10)
print(f"\n✅ Successfully stored {len(stored_memories)} memories with AI-extracted relationships!")
# Test relationship-aware search
print("\n🔍 Testing relationship-aware search...")
search_tests = [
"Who is Cyril's father?",
"What companies did Steve Jobs found?",
"Who created Python programming language?",
"Where is the Eiffel Tower located?",
"What did Einstein work on?"
]
for query in search_tests:
try:
response = await client.post(
f"{API_BASE_URL}/v1/memories/search",
json={
"query": query,
"user_id": "test_user",
"limit": 2,
"threshold": 0.3,
"include_graph": True
},
headers=headers,
timeout=30.0
)
if response.status_code == 200:
data = response.json()
print(f"\n Query: '{query}'")
print(f" Results: {data['total_count']}")
for memory in data['memories']:
print(f" - {memory['content'][:50]}... (similarity: {memory['similarity']:.3f})")
if 'relationships' in memory and memory['relationships']:
print(f" Graph relationships: {len(memory['relationships'])}")
for rel in memory['relationships'][:3]: # Show first 3
print(f"{rel['relationship']} {rel['entity_name']} ({rel['confidence']})")
else:
print(f" Query: '{query}' -> Failed ({response.status_code})")
except Exception as e:
print(f" Query: '{query}' -> Error: {e}")
print("\n" + "=" * 60)
print("🎉 AI Relationship Extraction Test Complete!")
print("📊 Summary:")
print(f" - Memories processed: {len(stored_memories)}")
print(f" - AI model used: llama3.2")
print(f" - Relationship types: Dynamic (extracted by AI)")
print(f" - Entity types: Person, Organization, Location, Technology, Concept, etc.")
print("\n🌐 Check Neo4j Browser for dynamic relationships:")
print(" - URL: http://localhost:7474")
print(" - Query: MATCH (n)-[r]->(m) RETURN n, r, m")
print(" - Look for relationships like IS_SON_OF, FOUNDED_BY, CREATED_BY, LOCATED_IN, etc.")
if __name__ == "__main__":
asyncio.run(test_ai_relationship_extraction())