- Added comprehensive N8N development tools collection - Added Docker-containerized mock API server for testing - Added complete documentation and setup guides - Added mock API server with health checks and data endpoints - Tools include workflow analyzers, debuggers, and controllers 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
280 lines
12 KiB
Python
280 lines
12 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Improved N8N API Client - Enhanced error logging and execution analysis
|
|
"""
|
|
|
|
import json
|
|
import requests
|
|
import time
|
|
from typing import Dict, List, Optional, Any
|
|
from dataclasses import dataclass
|
|
from datetime import datetime
|
|
|
|
|
|
@dataclass
|
|
class ExecutionError:
|
|
"""Detailed execution error information"""
|
|
execution_id: str
|
|
node_name: str
|
|
error_message: str
|
|
error_type: str
|
|
stack_trace: Optional[List[str]] = None
|
|
timestamp: Optional[str] = None
|
|
|
|
|
|
class ImprovedN8NClient:
|
|
"""Enhanced N8N client with better error handling and logging"""
|
|
|
|
def __init__(self, config_path: str = "n8n_api_credentials.json"):
|
|
"""Initialize enhanced N8N client"""
|
|
self.config = self._load_config(config_path)
|
|
self.session = requests.Session()
|
|
self.session.headers.update(self.config['headers'])
|
|
|
|
def _load_config(self, config_path: str) -> Dict:
|
|
"""Load N8N configuration from JSON file"""
|
|
with open(config_path, 'r') as f:
|
|
return json.load(f)
|
|
|
|
def _make_request(self, method: str, endpoint: str, data: Optional[Dict] = None) -> Dict:
|
|
"""Make authenticated request to N8N API with enhanced error handling"""
|
|
url = f"{self.config['api_url'].rstrip('/')}/{endpoint.lstrip('/')}"
|
|
|
|
try:
|
|
if method.upper() == 'GET':
|
|
response = self.session.get(url, params=data)
|
|
elif method.upper() == 'POST':
|
|
response = self.session.post(url, json=data)
|
|
elif method.upper() == 'PUT':
|
|
response = self.session.put(url, json=data)
|
|
elif method.upper() == 'DELETE':
|
|
response = self.session.delete(url)
|
|
else:
|
|
raise ValueError(f"Unsupported HTTP method: {method}")
|
|
|
|
response.raise_for_status()
|
|
return response.json() if response.content else {}
|
|
|
|
except requests.exceptions.RequestException as e:
|
|
print(f"API Request failed: {method} {url}")
|
|
print(f"Error: {e}")
|
|
if hasattr(e, 'response') and e.response is not None:
|
|
print(f"Response status: {e.response.status_code}")
|
|
print(f"Response text: {e.response.text[:500]}")
|
|
raise
|
|
|
|
def get_execution_with_logs(self, execution_id: str) -> Dict:
|
|
"""Get execution with detailed logging information"""
|
|
try:
|
|
# Get basic execution data
|
|
execution = self._make_request('GET', f'/executions/{execution_id}')
|
|
|
|
# Try to get additional log data if available
|
|
# Some N8N instances may have additional endpoints for logs
|
|
try:
|
|
logs = self._make_request('GET', f'/executions/{execution_id}/logs')
|
|
execution['detailed_logs'] = logs
|
|
except:
|
|
# Logs endpoint may not exist, continue without it
|
|
pass
|
|
|
|
return execution
|
|
|
|
except Exception as e:
|
|
print(f"Error getting execution logs: {e}")
|
|
raise
|
|
|
|
def analyze_execution_errors(self, execution_id: str) -> List[ExecutionError]:
|
|
"""Analyze execution and extract detailed error information"""
|
|
errors = []
|
|
|
|
try:
|
|
execution = self.get_execution_with_logs(execution_id)
|
|
|
|
# Check execution status
|
|
if execution.get('status') != 'error':
|
|
return errors # No errors to analyze
|
|
|
|
# Extract errors from execution data
|
|
if 'data' in execution:
|
|
data = execution['data']
|
|
|
|
# Check for global errors
|
|
if 'resultData' in data:
|
|
result_data = data['resultData']
|
|
|
|
# Global execution error
|
|
if 'error' in result_data:
|
|
global_error = result_data['error']
|
|
error = ExecutionError(
|
|
execution_id=execution_id,
|
|
node_name="GLOBAL",
|
|
error_message=str(global_error.get('message', global_error)),
|
|
error_type=global_error.get('type', 'unknown'),
|
|
timestamp=execution.get('stoppedAt')
|
|
)
|
|
|
|
# Extract stack trace if available
|
|
if 'stack' in global_error:
|
|
error.stack_trace = global_error['stack'].split('\n')
|
|
elif 'stackTrace' in global_error:
|
|
error.stack_trace = global_error['stackTrace']
|
|
|
|
errors.append(error)
|
|
|
|
# Node-specific errors
|
|
if 'runData' in result_data:
|
|
for node_name, node_runs in result_data['runData'].items():
|
|
for run_index, run in enumerate(node_runs):
|
|
if 'error' in run:
|
|
node_error = run['error']
|
|
|
|
error = ExecutionError(
|
|
execution_id=execution_id,
|
|
node_name=node_name,
|
|
error_message=str(node_error.get('message', node_error)),
|
|
error_type=node_error.get('type', 'unknown'),
|
|
timestamp=run.get('startTime')
|
|
)
|
|
|
|
# Extract stack trace
|
|
if 'stack' in node_error:
|
|
error.stack_trace = node_error['stack'].split('\n')
|
|
elif 'stackTrace' in node_error:
|
|
error.stack_trace = node_error['stackTrace']
|
|
|
|
errors.append(error)
|
|
|
|
return errors
|
|
|
|
except Exception as e:
|
|
print(f"Error analyzing execution errors: {e}")
|
|
return errors
|
|
|
|
def get_recent_errors(self, workflow_id: str, limit: int = 10) -> List[ExecutionError]:
|
|
"""Get recent errors for a workflow"""
|
|
all_errors = []
|
|
|
|
try:
|
|
# Get recent executions
|
|
executions = self._make_request('GET', '/executions', {
|
|
'workflowId': workflow_id,
|
|
'limit': limit,
|
|
'includeData': True
|
|
})
|
|
|
|
execution_list = executions.get('data', executions) if isinstance(executions, dict) else executions
|
|
|
|
for execution in execution_list:
|
|
if execution.get('status') == 'error':
|
|
exec_id = execution.get('id')
|
|
if exec_id:
|
|
errors = self.analyze_execution_errors(exec_id)
|
|
all_errors.extend(errors)
|
|
|
|
return all_errors
|
|
|
|
except Exception as e:
|
|
print(f"Error getting recent errors: {e}")
|
|
return all_errors
|
|
|
|
def find_template_errors(self, workflow_id: str) -> List[ExecutionError]:
|
|
"""Find specific template-related errors"""
|
|
all_errors = self.get_recent_errors(workflow_id, limit=20)
|
|
|
|
template_errors = []
|
|
for error in all_errors:
|
|
error_msg = error.error_message.lower()
|
|
if any(keyword in error_msg for keyword in ['template', 'single', 'brace', 'f-string']):
|
|
template_errors.append(error)
|
|
|
|
return template_errors
|
|
|
|
def execute_workflow_with_monitoring(self, workflow_id: str, test_data: Optional[Dict] = None, timeout: int = 300) -> Dict:
|
|
"""Execute workflow and monitor for detailed error information"""
|
|
try:
|
|
# Start execution
|
|
result = self._make_request('POST', f'/workflows/{workflow_id}/execute', test_data)
|
|
|
|
# Extract execution ID
|
|
exec_id = None
|
|
if 'data' in result and 'id' in result['data']:
|
|
exec_id = result['data']['id']
|
|
elif 'id' in result:
|
|
exec_id = result['id']
|
|
|
|
if not exec_id:
|
|
print("Warning: Could not get execution ID")
|
|
return result
|
|
|
|
print(f"Started execution {exec_id}, monitoring...")
|
|
|
|
# Monitor execution
|
|
start_time = time.time()
|
|
while time.time() - start_time < timeout:
|
|
execution = self.get_execution_with_logs(exec_id)
|
|
status = execution.get('status')
|
|
|
|
if status in ['success', 'error', 'cancelled']:
|
|
if status == 'error':
|
|
print(f"Execution {exec_id} failed, analyzing errors...")
|
|
errors = self.analyze_execution_errors(exec_id)
|
|
execution['analyzed_errors'] = errors
|
|
|
|
# Print detailed error information
|
|
for error in errors:
|
|
print(f"\nError in {error.node_name}:")
|
|
print(f" Message: {error.error_message}")
|
|
print(f" Type: {error.error_type}")
|
|
if error.stack_trace:
|
|
print(f" Stack trace: {error.stack_trace[:3]}") # First 3 lines
|
|
|
|
return execution
|
|
|
|
time.sleep(2)
|
|
|
|
raise TimeoutError(f"Execution {exec_id} did not complete within {timeout} seconds")
|
|
|
|
except Exception as e:
|
|
print(f"Error in monitored execution: {e}")
|
|
raise
|
|
|
|
# Include essential methods from original client
|
|
def list_workflows(self) -> List[Dict]:
|
|
"""Get list of all workflows"""
|
|
response = self._make_request('GET', '/workflows')
|
|
return response.get('data', response) if isinstance(response, dict) else response
|
|
|
|
def get_workflow(self, workflow_id: str) -> Dict:
|
|
"""Get specific workflow by ID"""
|
|
return self._make_request('GET', f'/workflows/{workflow_id}')
|
|
|
|
def update_workflow(self, workflow_id: str, workflow_data: Dict) -> Dict:
|
|
"""Update existing workflow"""
|
|
return self._make_request('PUT', f'/workflows/{workflow_id}', workflow_data)
|
|
|
|
def get_executions(self, workflow_id: Optional[str] = None, limit: int = 20) -> Dict:
|
|
"""Get workflow executions"""
|
|
params = {"limit": limit, "includeData": True}
|
|
if workflow_id:
|
|
params["workflowId"] = workflow_id
|
|
return self._make_request('GET', '/executions', params)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
# Test the improved client
|
|
try:
|
|
client = ImprovedN8NClient()
|
|
print("Enhanced N8N client initialized successfully")
|
|
|
|
# Test with Matrix workflow
|
|
template_errors = client.find_template_errors('w6Sz5trluur5qdMj')
|
|
if template_errors:
|
|
print(f"\nFound {len(template_errors)} template errors:")
|
|
for error in template_errors:
|
|
print(f"- {error.node_name}: {error.error_message}")
|
|
else:
|
|
print("No template errors found")
|
|
|
|
except Exception as e:
|
|
print(f"Failed to initialize enhanced client: {e}") |