Clean and organize project structure
Major reorganization: - Created scripts/ directory for all utility scripts - Created config/ directory for configuration files - Moved all test files to tests/ directory - Updated all script paths to work with new structure - Updated README.md with new project structure diagram New structure: ├── src/ # Source code (API + MCP) ├── scripts/ # Utility scripts (start-*.sh, docs_server.py, etc.) ├── tests/ # All test files and debug utilities ├── config/ # Configuration files (JSON, Caddy config) ├── docs/ # Documentation website └── logs/ # Log files All scripts updated to use relative paths from project root. Documentation updated with new folder structure. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
186
tests/populate_test_data.py
Normal file
186
tests/populate_test_data.py
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Populate LangMem with test data for Supabase web UI viewing
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import httpx
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
# Configuration
|
||||
API_BASE_URL = "http://localhost:8765"
|
||||
API_KEY = "langmem_api_key_2025"
|
||||
|
||||
# Test memories to store
|
||||
test_memories = [
|
||||
{
|
||||
"content": "Claude Code is an AI-powered CLI tool that helps with software development tasks. It can read files, search codebases, and generate code.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_1",
|
||||
"metadata": {
|
||||
"category": "tools",
|
||||
"subcategory": "ai_development",
|
||||
"importance": "high",
|
||||
"tags": ["claude", "ai", "cli", "development"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "FastAPI is a modern, fast web framework for building APIs with Python. It provides automatic API documentation and type hints.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_1",
|
||||
"metadata": {
|
||||
"category": "frameworks",
|
||||
"subcategory": "python_web",
|
||||
"importance": "medium",
|
||||
"tags": ["fastapi", "python", "web", "api"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "Docker containers provide lightweight virtualization for applications. They package software with all dependencies for consistent deployment.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_2",
|
||||
"metadata": {
|
||||
"category": "devops",
|
||||
"subcategory": "containerization",
|
||||
"importance": "high",
|
||||
"tags": ["docker", "containers", "devops", "deployment"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "PostgreSQL with pgvector extension enables vector similarity search for embeddings. This is useful for semantic search and AI applications.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_2",
|
||||
"metadata": {
|
||||
"category": "databases",
|
||||
"subcategory": "vector_search",
|
||||
"importance": "high",
|
||||
"tags": ["postgresql", "pgvector", "embeddings", "search"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "N8N is an open-source workflow automation tool that connects different services and APIs. It provides a visual interface for building workflows.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_3",
|
||||
"metadata": {
|
||||
"category": "automation",
|
||||
"subcategory": "workflow_tools",
|
||||
"importance": "medium",
|
||||
"tags": ["n8n", "automation", "workflow", "integration"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "Ollama runs large language models locally on your machine. It supports models like Llama, Mistral, and provides embedding capabilities.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_3",
|
||||
"metadata": {
|
||||
"category": "ai",
|
||||
"subcategory": "local_models",
|
||||
"importance": "high",
|
||||
"tags": ["ollama", "llm", "local", "embeddings"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "Supabase is an open-source Firebase alternative that provides database, authentication, and real-time subscriptions with PostgreSQL.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_4",
|
||||
"metadata": {
|
||||
"category": "backend",
|
||||
"subcategory": "baas",
|
||||
"importance": "medium",
|
||||
"tags": ["supabase", "database", "authentication", "backend"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "Neo4j is a graph database that stores data as nodes and relationships. It's excellent for modeling complex relationships and network data.",
|
||||
"user_id": "demo_user",
|
||||
"session_id": "demo_session_4",
|
||||
"metadata": {
|
||||
"category": "databases",
|
||||
"subcategory": "graph_database",
|
||||
"importance": "medium",
|
||||
"tags": ["neo4j", "graph", "relationships", "cypher"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
async def store_test_memories():
|
||||
"""Store test memories in LangMem API"""
|
||||
print("🧪 Populating LangMem with test data...")
|
||||
print("=" * 50)
|
||||
|
||||
headers = {"Authorization": f"Bearer {API_KEY}"}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
stored_memories = []
|
||||
|
||||
for i, memory in enumerate(test_memories, 1):
|
||||
try:
|
||||
print(f"\n{i}. Storing: {memory['content'][:50]}...")
|
||||
|
||||
response = await client.post(
|
||||
f"{API_BASE_URL}/v1/memories/store",
|
||||
json=memory,
|
||||
headers=headers,
|
||||
timeout=30.0
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
stored_memories.append(data)
|
||||
print(f" ✅ Stored with ID: {data['id']}")
|
||||
else:
|
||||
print(f" ❌ Failed: {response.status_code}")
|
||||
print(f" Response: {response.text}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error: {e}")
|
||||
|
||||
print(f"\n🎉 Successfully stored {len(stored_memories)} memories!")
|
||||
print("\n📊 Summary:")
|
||||
print(f" - Total memories: {len(stored_memories)}")
|
||||
print(f" - User: demo_user")
|
||||
print(f" - Sessions: {len(set(m['session_id'] for m in test_memories))}")
|
||||
print(f" - Categories: {len(set(m['metadata']['category'] for m in test_memories))}")
|
||||
|
||||
# Test search functionality
|
||||
print("\n🔍 Testing search functionality...")
|
||||
search_tests = [
|
||||
"Python web development",
|
||||
"AI and machine learning",
|
||||
"Database and storage",
|
||||
"Docker containers"
|
||||
]
|
||||
|
||||
for query in search_tests:
|
||||
try:
|
||||
response = await client.post(
|
||||
f"{API_BASE_URL}/v1/memories/search",
|
||||
json={
|
||||
"query": query,
|
||||
"user_id": "demo_user",
|
||||
"limit": 3,
|
||||
"threshold": 0.5
|
||||
},
|
||||
headers=headers,
|
||||
timeout=30.0
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
print(f" Query: '{query}' -> {data['total_count']} results")
|
||||
for memory in data['memories']:
|
||||
print(f" - {memory['content'][:40]}... ({memory['similarity']:.3f})")
|
||||
else:
|
||||
print(f" Query: '{query}' -> Failed ({response.status_code})")
|
||||
|
||||
except Exception as e:
|
||||
print(f" Query: '{query}' -> Error: {e}")
|
||||
|
||||
print("\n✅ Test data population complete!")
|
||||
print(" You can now view the memories in Supabase web UI:")
|
||||
print(" - Table: langmem_documents")
|
||||
print(" - URL: http://localhost:8000")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(store_test_memories())
|
||||
Reference in New Issue
Block a user