- Added comprehensive N8N development tools collection - Added Docker-containerized mock API server for testing - Added complete documentation and setup guides - Added mock API server with health checks and data endpoints - Tools include workflow analyzers, debuggers, and controllers 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
258 lines
11 KiB
Python
258 lines
11 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Real-time Error Catcher - Monitor Matrix workflow and catch template errors as they happen
|
|
"""
|
|
|
|
import sys
|
|
sys.path.append('/home/klas/claude_n8n/tools')
|
|
import json
|
|
import requests
|
|
import time
|
|
import threading
|
|
from datetime import datetime
|
|
from typing import Dict, Optional
|
|
|
|
|
|
class RealTimeErrorCatcher:
|
|
"""Catch template errors in real-time by monitoring workflow executions"""
|
|
|
|
def __init__(self, config_path: str = "n8n_api_credentials.json"):
|
|
self.config = self._load_config(config_path)
|
|
self.session = requests.Session()
|
|
self.session.headers.update(self.config['headers'])
|
|
self.api_url = self.config['api_url']
|
|
self.monitoring = False
|
|
self.error_found = False
|
|
self.detailed_errors = []
|
|
|
|
def _load_config(self, config_path: str) -> Dict:
|
|
with open(config_path, 'r') as f:
|
|
return json.load(f)
|
|
|
|
def _make_request(self, method: str, endpoint: str, params: Optional[Dict] = None) -> Dict:
|
|
url = f"{self.api_url.rstrip('/')}/{endpoint.lstrip('/')}"
|
|
|
|
try:
|
|
if method.upper() == 'GET':
|
|
response = self.session.get(url, params=params)
|
|
else:
|
|
raise ValueError(f"Only GET supported in this tool")
|
|
|
|
response.raise_for_status()
|
|
return response.json() if response.content else {}
|
|
|
|
except Exception as e:
|
|
print(f"API Error: {e}")
|
|
return {}
|
|
|
|
def start_monitoring(self, workflow_id: str):
|
|
"""Start monitoring workflow executions for errors"""
|
|
self.monitoring = True
|
|
self.error_found = False
|
|
self.detailed_errors = []
|
|
|
|
print(f"🎯 Starting real-time error monitoring for Matrix workflow")
|
|
print(f"🔍 Monitoring workflow: {workflow_id}")
|
|
print(f"⏰ Started at: {datetime.now().strftime('%H:%M:%S')}")
|
|
print("=" * 60)
|
|
|
|
# Get baseline executions
|
|
seen_executions = set()
|
|
try:
|
|
initial = self._make_request('GET', '/executions', {'workflowId': workflow_id, 'limit': 5})
|
|
for exec_data in initial.get('data', []):
|
|
seen_executions.add(exec_data['id'])
|
|
print(f"📊 Baseline: {len(seen_executions)} existing executions")
|
|
except:
|
|
print("⚠️ Could not get baseline executions")
|
|
|
|
# Monitor loop
|
|
consecutive_successes = 0
|
|
while self.monitoring:
|
|
try:
|
|
# Get recent executions
|
|
executions = self._make_request('GET', '/executions', {'workflowId': workflow_id, 'limit': 10})
|
|
|
|
new_executions = []
|
|
for exec_data in executions.get('data', []):
|
|
exec_id = exec_data['id']
|
|
if exec_id not in seen_executions:
|
|
seen_executions.add(exec_id)
|
|
new_executions.append(exec_data)
|
|
|
|
# Process new executions
|
|
for exec_data in new_executions:
|
|
exec_id = exec_data['id']
|
|
status = exec_data.get('status')
|
|
started_at = exec_data.get('startedAt', '')
|
|
|
|
timestamp = datetime.now().strftime('%H:%M:%S')
|
|
print(f"\n🆕 [{timestamp}] New execution: {exec_id}")
|
|
print(f" Status: {status}")
|
|
|
|
if status == 'error':
|
|
print(f" 🚨 ERROR DETECTED! Analyzing...")
|
|
|
|
# Get detailed error information
|
|
try:
|
|
details = self._make_request('GET', f'/executions/{exec_id}')
|
|
error_info = self._deep_analyze_error(details)
|
|
|
|
if error_info:
|
|
self.detailed_errors.append({
|
|
'execution_id': exec_id,
|
|
'timestamp': timestamp,
|
|
'error_info': error_info
|
|
})
|
|
|
|
if 'template' in error_info.lower() or 'single' in error_info.lower():
|
|
print(f" 💥 TEMPLATE ERROR CONFIRMED!")
|
|
print(f" 📝 Error: {error_info}")
|
|
self.error_found = True
|
|
|
|
# Try to get the input data that caused this
|
|
input_data = self._extract_input_data(details)
|
|
if input_data:
|
|
print(f" 📥 Input data that triggered error:")
|
|
print(f" {input_data}")
|
|
else:
|
|
print(f" 📝 Non-template error: {error_info}")
|
|
else:
|
|
print(f" ❓ Could not extract error details")
|
|
|
|
except Exception as e:
|
|
print(f" ❌ Failed to analyze error: {e}")
|
|
|
|
elif status == 'success':
|
|
consecutive_successes += 1
|
|
print(f" ✅ Success (consecutive: {consecutive_successes})")
|
|
|
|
else:
|
|
print(f" ⏳ Status: {status}")
|
|
|
|
# If we found a template error, we can stop or continue monitoring
|
|
if self.error_found:
|
|
print(f"\n🎯 TEMPLATE ERROR FOUND! Continuing to monitor for patterns...")
|
|
|
|
time.sleep(1) # Check every second
|
|
|
|
except KeyboardInterrupt:
|
|
print(f"\n⏹️ Monitoring stopped by user")
|
|
break
|
|
except Exception as e:
|
|
print(f" ⚠️ Monitoring error: {e}")
|
|
time.sleep(2)
|
|
|
|
self.monitoring = False
|
|
print(f"\n📊 MONITORING SUMMARY")
|
|
print("=" * 30)
|
|
print(f"🔍 Total errors detected: {len(self.detailed_errors)}")
|
|
print(f"💥 Template errors found: {len([e for e in self.detailed_errors if 'template' in e['error_info'].lower()])}")
|
|
|
|
return self.detailed_errors
|
|
|
|
def _deep_analyze_error(self, execution_details: Dict) -> str:
|
|
"""Extract detailed error information"""
|
|
if 'data' not in execution_details:
|
|
return "No execution data"
|
|
|
|
data = execution_details['data']
|
|
|
|
# Check for global errors first
|
|
if 'resultData' in data:
|
|
result_data = data['resultData']
|
|
|
|
if 'error' in result_data:
|
|
error = result_data['error']
|
|
if isinstance(error, dict):
|
|
message = error.get('message', str(error))
|
|
error_type = error.get('type', '')
|
|
stack = error.get('stack', '')
|
|
|
|
full_error = f"Type: {error_type}, Message: {message}"
|
|
if 'template' in message.lower() or 'single' in message.lower():
|
|
if stack:
|
|
# Extract relevant stack trace lines
|
|
stack_lines = str(stack).split('\\n')[:3]
|
|
full_error += f", Stack: {stack_lines}"
|
|
|
|
return full_error
|
|
else:
|
|
return str(error)
|
|
|
|
# Check node-specific errors
|
|
if 'runData' in result_data:
|
|
for node_name, runs in result_data['runData'].items():
|
|
for run in runs:
|
|
if 'error' in run:
|
|
error = run['error']
|
|
if isinstance(error, dict):
|
|
message = error.get('message', str(error))
|
|
return f"Node {node_name}: {message}"
|
|
else:
|
|
return f"Node {node_name}: {str(error)}"
|
|
|
|
return "Unknown error structure"
|
|
|
|
def _extract_input_data(self, execution_details: Dict) -> Optional[str]:
|
|
"""Try to extract the input data that caused the error"""
|
|
if 'data' not in execution_details:
|
|
return None
|
|
|
|
data = execution_details['data']
|
|
|
|
if 'resultData' in data and 'runData' in data['resultData']:
|
|
run_data = data['resultData']['runData']
|
|
|
|
# Look for data that would go into Information Extractor
|
|
for node_name in ['Split Out', 'Loop Over Items', 'Code4', 'HTTP Request3']:
|
|
if node_name in run_data:
|
|
node_runs = run_data[node_name]
|
|
for run in node_runs:
|
|
if 'data' in run and 'main' in run['data']:
|
|
main_data = run['data']['main']
|
|
if main_data and len(main_data) > 0:
|
|
# Extract chunk data
|
|
for item in main_data[:2]: # First 2 items
|
|
if isinstance(item, dict) and 'chunk' in item:
|
|
chunk = item['chunk']
|
|
if isinstance(chunk, str) and len(chunk) > 0:
|
|
return f"chunk: {repr(chunk[:200])}..."
|
|
|
|
return None
|
|
|
|
def stop_monitoring(self):
|
|
"""Stop the monitoring"""
|
|
self.monitoring = False
|
|
|
|
|
|
def manual_trigger_test():
|
|
"""Manually trigger some test scenarios"""
|
|
print("🧪 MANUAL TEST SCENARIOS")
|
|
print("This would inject test data if we had manual trigger capability")
|
|
print("For now, we rely on the scheduled executions to trigger errors")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
catcher = RealTimeErrorCatcher()
|
|
|
|
print("🔧 Real-time Error Catcher initialized")
|
|
print("🎯 This tool will monitor Matrix workflow executions and catch template errors")
|
|
print("💡 The workflow runs every second, so errors should be detected quickly")
|
|
|
|
try:
|
|
# Start monitoring
|
|
errors = catcher.start_monitoring('w6Sz5trluur5qdMj')
|
|
|
|
print(f"\n🎯 FINAL RESULTS:")
|
|
if errors:
|
|
print(f"❌ Found {len(errors)} errors:")
|
|
for error in errors:
|
|
print(f" - {error['timestamp']}: {error['error_info']}")
|
|
else:
|
|
print("✅ No errors detected during monitoring period")
|
|
|
|
except KeyboardInterrupt:
|
|
print("\n⏹️ Monitoring interrupted by user")
|
|
except Exception as e:
|
|
print(f"\n💥 Error catcher failed: {e}") |