Files
tkb_timeshift/claude_n8n/tools/n8n_debugger.py
Docker Config Backup 8793ac4f59 Add Claude N8N toolkit with Docker mock API server
- Added comprehensive N8N development tools collection
- Added Docker-containerized mock API server for testing
- Added complete documentation and setup guides
- Added mock API server with health checks and data endpoints
- Tools include workflow analyzers, debuggers, and controllers

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-06-17 21:23:46 +02:00

442 lines
18 KiB
Python

#!/usr/bin/env python3
"""
N8N Workflow Debugger - Real-time error detection and test data injection
"""
import json
import requests
import time
import copy
from typing import Dict, List, Optional, Any
from datetime import datetime
class N8NDebugger:
"""Advanced N8N debugging tools with test injection and real-time monitoring"""
def __init__(self, config_path: str = "n8n_api_credentials.json"):
self.config = self._load_config(config_path)
self.session = requests.Session()
self.session.headers.update(self.config['headers'])
self.api_url = self.config['api_url']
def _load_config(self, config_path: str) -> Dict:
with open(config_path, 'r') as f:
return json.load(f)
def _make_request(self, method: str, endpoint: str, data: Optional[Dict] = None, params: Optional[Dict] = None) -> Dict:
"""Enhanced request method with better error reporting"""
url = f"{self.api_url.rstrip('/')}/{endpoint.lstrip('/')}"
try:
if method.upper() == 'GET':
response = self.session.get(url, params=params)
elif method.upper() == 'POST':
response = self.session.post(url, json=data, params=params)
elif method.upper() == 'PUT':
response = self.session.put(url, json=data)
else:
raise ValueError(f"Unsupported method: {method}")
response.raise_for_status()
return response.json() if response.content else {}
except requests.exceptions.RequestException as e:
print(f"❌ API Error: {method} {url}")
print(f" Status: {getattr(e.response, 'status_code', 'Unknown')}")
print(f" Error: {e}")
if hasattr(e, 'response') and e.response:
try:
error_data = e.response.json()
print(f" Details: {error_data}")
except:
print(f" Raw response: {e.response.text[:500]}")
raise
def create_test_workflow(self, base_workflow_id: str, test_node_name: str, test_data: Any) -> str:
"""Create a minimal test workflow focused on a specific node"""
print(f"🔧 Creating test workflow for node: {test_node_name}")
# Get the original workflow
workflow = self._make_request('GET', f'/workflows/{base_workflow_id}')
# Find the target node and its dependencies
target_node = None
nodes = workflow.get('nodes', [])
for node in nodes:
if node.get('name') == test_node_name:
target_node = node
break
if not target_node:
raise ValueError(f"Node '{test_node_name}' not found in workflow")
# Create a minimal test workflow with just the target node and a manual trigger
test_workflow = {
'name': f'TEST_{test_node_name}_{int(time.time())}',
'nodes': [
{
'id': 'manual-trigger',
'name': 'Manual Trigger',
'type': 'n8n-nodes-base.manualTrigger',
'position': [100, 100],
'parameters': {}
},
{
'id': target_node.get('id', 'test-node'),
'name': target_node['name'],
'type': target_node['type'],
'position': [300, 100],
'parameters': target_node.get('parameters', {})
}
],
'connections': {
'Manual Trigger': {
'main': [
[
{
'node': target_node['name'],
'type': 'main',
'index': 0
}
]
]
}
},
'active': False,
'settings': {},
'staticData': {}
}
# Create the test workflow
created = self._make_request('POST', '/workflows', test_workflow)
test_workflow_id = created.get('id')
print(f"✅ Created test workflow: {test_workflow_id}")
return test_workflow_id
def inject_test_data_and_execute(self, workflow_id: str, test_data: Dict) -> Dict:
"""Execute workflow with specific test data and capture detailed results"""
print(f"🚀 Executing workflow with test data: {test_data}")
try:
# Execute with test data
execution_result = self._make_request('POST', f'/workflows/{workflow_id}/execute', test_data)
# Get execution ID
exec_id = None
if isinstance(execution_result, dict):
exec_id = execution_result.get('data', {}).get('id') or execution_result.get('id')
if not exec_id:
print(f"⚠️ No execution ID returned, result: {execution_result}")
return execution_result
print(f"📊 Monitoring execution: {exec_id}")
# Monitor execution with detailed logging
start_time = time.time()
while time.time() - start_time < 30: # 30 second timeout
exec_details = self._make_request('GET', f'/executions/{exec_id}')
status = exec_details.get('status')
if status in ['success', 'error', 'cancelled']:
print(f"🏁 Execution completed with status: {status}")
if status == 'error':
self._analyze_execution_error(exec_details)
return exec_details
print(f"⏳ Status: {status}")
time.sleep(1)
print("⏰ Execution timeout")
return exec_details
except Exception as e:
print(f"💥 Execution failed: {e}")
raise
def _analyze_execution_error(self, execution_details: Dict):
"""Deep analysis of execution errors"""
print("\n🔍 DETAILED ERROR ANALYSIS")
print("=" * 50)
if 'data' not in execution_details:
print("❌ No execution data available")
return
data = execution_details['data']
# Check for global errors
if 'resultData' in data:
result_data = data['resultData']
if 'error' in result_data:
global_error = result_data['error']
print(f"🚨 GLOBAL ERROR:")
print(f" Type: {global_error.get('type', 'Unknown')}")
print(f" Message: {global_error.get('message', global_error)}")
if 'stack' in global_error:
print(f" Stack trace:")
for line in str(global_error['stack']).split('\\n')[:5]:
print(f" {line}")
# Analyze node-specific errors
if 'runData' in result_data:
print(f"\n📋 NODE EXECUTION DETAILS:")
for node_name, runs in result_data['runData'].items():
print(f"\n 📦 Node: {node_name}")
for i, run in enumerate(runs):
print(f" Run {i+1}:")
if 'error' in run:
error = run['error']
print(f" 🚨 ERROR: {error}")
if isinstance(error, dict):
if 'message' in error:
print(f" Message: {error['message']}")
if 'type' in error:
print(f" Type: {error['type']}")
if 'stack' in error:
stack_lines = str(error['stack']).split('\\n')[:3]
print(f" Stack: {stack_lines}")
if 'data' in run:
run_data = run['data']
if 'main' in run_data and run_data['main']:
print(f" ✅ Input data: {len(run_data['main'])} items")
for j, item in enumerate(run_data['main'][:2]): # Show first 2 items
print(f" Item {j+1}: {str(item)[:100]}...")
if 'startTime' in run:
print(f" ⏱️ Started: {run['startTime']}")
if 'executionTime' in run:
print(f" ⏱️ Duration: {run['executionTime']}ms")
def test_information_extractor_with_samples(self, workflow_id: str) -> List[Dict]:
"""Test Information Extractor with various problematic data samples"""
print("\n🧪 TESTING INFORMATION EXTRACTOR WITH SAMPLE DATA")
print("=" * 60)
# Create test data samples that might cause template errors
test_samples = [
{
"name": "simple_text",
"data": {"chunk": "This is a simple test message"}
},
{
"name": "single_quotes",
"data": {"chunk": "This message contains single quotes: it's working"}
},
{
"name": "json_like_with_quotes",
"data": {"chunk": '{"message": "it\'s a test with quotes"}'}
},
{
"name": "template_like_syntax",
"data": {"chunk": "Template syntax: {variable} with quote: that's it"}
},
{
"name": "mixed_quotes_and_braces",
"data": {"chunk": "Complex: {item: 'value'} and more {data: 'test'}"}
},
{
"name": "czech_text_with_quotes",
"data": {"chunk": "Český text s apostrofy: to je náš systém"}
},
{
"name": "empty_chunk",
"data": {"chunk": ""}
},
{
"name": "null_chunk",
"data": {"chunk": None}
},
{
"name": "unicode_and_quotes",
"data": {"chunk": "Unicode: ěščřžýáíé with quotes: that's nice"}
}
]
results = []
for sample in test_samples:
print(f"\n🔬 Testing: {sample['name']}")
print(f" Data: {sample['data']}")
try:
# Create a temporary test workflow for this node
test_workflow_id = self.create_test_workflow(workflow_id, 'Information Extractor', sample['data'])
try:
# Execute with the test data
result = self.inject_test_data_and_execute(test_workflow_id, sample['data'])
test_result = {
'sample': sample['name'],
'input_data': sample['data'],
'status': result.get('status'),
'success': result.get('status') == 'success',
'error_details': None
}
if result.get('status') == 'error':
test_result['error_details'] = self._extract_error_summary(result)
print(f" ❌ FAILED: {test_result['error_details']}")
else:
print(f" ✅ SUCCESS")
results.append(test_result)
finally:
# Clean up test workflow
try:
self._make_request('DELETE', f'/workflows/{test_workflow_id}')
except:
pass
except Exception as e:
print(f" 💥 EXCEPTION: {e}")
results.append({
'sample': sample['name'],
'input_data': sample['data'],
'status': 'exception',
'success': False,
'error_details': str(e)
})
# Summary
print(f"\n📊 TEST RESULTS SUMMARY")
print("=" * 30)
success_count = len([r for r in results if r['success']])
total_count = len(results)
print(f"✅ Successful: {success_count}/{total_count}")
print(f"❌ Failed: {total_count - success_count}/{total_count}")
print(f"\n🚨 FAILED TESTS:")
for result in results:
if not result['success']:
print(f" - {result['sample']}: {result.get('error_details', 'Unknown error')}")
return results
def _extract_error_summary(self, execution_details: Dict) -> str:
"""Extract a concise error summary"""
if 'data' not in execution_details:
return "No execution data"
data = execution_details['data']
if 'resultData' in data:
result_data = data['resultData']
# Check for global error
if 'error' in result_data:
error = result_data['error']
if isinstance(error, dict):
return error.get('message', str(error))
return str(error)
# Check for node errors
if 'runData' in result_data:
for node_name, runs in result_data['runData'].items():
for run in runs:
if 'error' in run:
error = run['error']
if isinstance(error, dict):
return f"{node_name}: {error.get('message', str(error))}"
return f"{node_name}: {str(error)}"
return "Unknown error"
def monitor_workflow_realtime(self, workflow_id: str, duration_seconds: int = 60):
"""Monitor workflow executions in real-time and catch errors immediately"""
print(f"\n📡 REAL-TIME MONITORING ({duration_seconds}s)")
print("=" * 40)
start_time = time.time()
seen_executions = set()
# Get initial executions
try:
initial_execs = self._make_request('GET', '/executions', {'workflowId': workflow_id, 'limit': 5})
for exec_data in initial_execs.get('data', []):
seen_executions.add(exec_data['id'])
except:
pass
while time.time() - start_time < duration_seconds:
try:
# Get recent executions
executions = self._make_request('GET', '/executions', {'workflowId': workflow_id, 'limit': 10})
for exec_data in executions.get('data', []):
exec_id = exec_data['id']
if exec_id not in seen_executions:
seen_executions.add(exec_id)
status = exec_data.get('status')
started_at = exec_data.get('startedAt', '')
print(f"\n🆕 New execution: {exec_id}")
print(f" Started: {started_at}")
print(f" Status: {status}")
if status == 'error':
print(f" 🚨 ERROR DETECTED - Getting details...")
# Get full error details
try:
details = self._make_request('GET', f'/executions/{exec_id}')
self._analyze_execution_error(details)
except Exception as e:
print(f" Failed to get error details: {e}")
elif status == 'success':
print(f" ✅ Success")
time.sleep(2)
except Exception as e:
print(f" Monitoring error: {e}")
time.sleep(2)
print(f"\n📊 Monitoring complete. Watched for {duration_seconds} seconds.")
if __name__ == "__main__":
# Test the debugger
debugger = N8NDebugger()
print("🔧 N8N Debugger initialized")
print("Testing Information Extractor with sample data...")
try:
# Test with various data samples
results = debugger.test_information_extractor_with_samples('w6Sz5trluur5qdMj')
print("\n🎯 FINAL RESULTS:")
failed_tests = [r for r in results if not r['success']]
if failed_tests:
print("❌ Template errors found with these data patterns:")
for test in failed_tests:
print(f" - {test['sample']}: {test['input_data']}")
print(f" Error: {test['error_details']}")
else:
print("✅ All tests passed - no template errors detected")
except Exception as e:
print(f"💥 Debugger failed: {e}")
# Fall back to real-time monitoring
print("\n📡 Falling back to real-time monitoring...")
debugger.monitor_workflow_realtime('w6Sz5trluur5qdMj', 30)