- Added comprehensive N8N development tools collection - Added Docker-containerized mock API server for testing - Added complete documentation and setup guides - Added mock API server with health checks and data endpoints - Tools include workflow analyzers, debuggers, and controllers 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
332 lines
12 KiB
Python
332 lines
12 KiB
Python
"""
|
|
Enhanced Workflow Controller for N8N
|
|
|
|
This module provides enhanced functionality to control N8N workflows with better
|
|
error handling and alternative methods for workflow management.
|
|
"""
|
|
|
|
import logging
|
|
import subprocess
|
|
import time
|
|
from typing import List, Dict, Any, Optional
|
|
from .n8n_client import N8NClient
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class EnhancedWorkflowController:
|
|
"""Enhanced controller for managing N8N workflow states with multiple approaches."""
|
|
|
|
def __init__(self, client: Optional[N8NClient] = None):
|
|
"""
|
|
Initialize the enhanced workflow controller.
|
|
|
|
Args:
|
|
client: N8N client instance. If None, creates a new one.
|
|
"""
|
|
self.client = client or N8NClient()
|
|
self._original_states = {}
|
|
|
|
def force_refresh_workflow(self, workflow_id: str) -> Dict[str, Any]:
|
|
"""
|
|
Force refresh a workflow by downloading the latest version.
|
|
|
|
Args:
|
|
workflow_id: ID of the workflow to refresh
|
|
|
|
Returns:
|
|
Fresh workflow data
|
|
"""
|
|
try:
|
|
# Add cache-busting timestamp to force fresh data
|
|
import time
|
|
cache_buster = int(time.time())
|
|
|
|
workflow = self.client.get_workflow(workflow_id)
|
|
|
|
logger.info(f"Force refreshed workflow {workflow_id} at {cache_buster}")
|
|
|
|
return {
|
|
'success': True,
|
|
'workflow': workflow,
|
|
'refreshed_at': cache_buster,
|
|
'last_updated': workflow.get('updatedAt')
|
|
}
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to force refresh workflow {workflow_id}: {e}")
|
|
return {
|
|
'success': False,
|
|
'error': str(e),
|
|
'workflow_id': workflow_id
|
|
}
|
|
|
|
def stop_workflows_via_docker(self, exclude_ids: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
"""
|
|
Alternative method to stop workflows by restarting N8N container.
|
|
This forces all workflows to inactive state.
|
|
|
|
Args:
|
|
exclude_ids: Workflows to reactivate after restart
|
|
|
|
Returns:
|
|
Result of the operation
|
|
"""
|
|
exclude_ids = exclude_ids or []
|
|
|
|
try:
|
|
print("⚠️ WARNING: This will restart the N8N container")
|
|
print("All workflows will be stopped, then excluded ones reactivated")
|
|
|
|
# Store current active workflows
|
|
workflows = self.client.list_workflows()
|
|
currently_active = []
|
|
|
|
for wf in workflows:
|
|
if wf.get('active', False):
|
|
currently_active.append({
|
|
'id': wf.get('id'),
|
|
'name': wf.get('name', 'Unknown')
|
|
})
|
|
|
|
print(f"Currently active workflows: {len(currently_active)}")
|
|
|
|
# Restart container (this stops all workflows)
|
|
print("🔄 Restarting N8N container...")
|
|
result = subprocess.run(
|
|
["docker", "restart", "n8n-n8n-1"],
|
|
capture_output=True,
|
|
text=True,
|
|
timeout=60
|
|
)
|
|
|
|
if result.returncode == 0:
|
|
print("✅ Container restarted successfully")
|
|
|
|
# Wait for N8N to start
|
|
print("⏱️ Waiting for N8N to start...")
|
|
time.sleep(20)
|
|
|
|
# Reactivate excluded workflows
|
|
reactivated = []
|
|
failed_reactivation = []
|
|
|
|
for workflow_id in exclude_ids:
|
|
try:
|
|
workflow = self.client.get_workflow(workflow_id)
|
|
updated_workflow = {**workflow, 'active': True}
|
|
self.client.update_workflow(workflow_id, updated_workflow)
|
|
|
|
reactivated.append({
|
|
'id': workflow_id,
|
|
'name': workflow.get('name', 'Unknown')
|
|
})
|
|
|
|
except Exception as e:
|
|
failed_reactivation.append({
|
|
'id': workflow_id,
|
|
'error': str(e)
|
|
})
|
|
|
|
return {
|
|
'success': True,
|
|
'method': 'docker_restart',
|
|
'previously_active': currently_active,
|
|
'reactivated': reactivated,
|
|
'failed_reactivation': failed_reactivation,
|
|
'message': 'Container restarted, workflows reset'
|
|
}
|
|
|
|
else:
|
|
return {
|
|
'success': False,
|
|
'error': f"Docker restart failed: {result.stderr}",
|
|
'method': 'docker_restart'
|
|
}
|
|
|
|
except Exception as e:
|
|
return {
|
|
'success': False,
|
|
'error': str(e),
|
|
'method': 'docker_restart'
|
|
}
|
|
|
|
def isolate_workflow_for_debugging(self, debug_workflow_id: str) -> Dict[str, Any]:
|
|
"""
|
|
Isolate a specific workflow for debugging by stopping all others.
|
|
Uses multiple approaches for maximum effectiveness.
|
|
|
|
Args:
|
|
debug_workflow_id: ID of the workflow to keep active for debugging
|
|
|
|
Returns:
|
|
Result of isolation process
|
|
"""
|
|
print(f"🔧 ISOLATING WORKFLOW FOR DEBUGGING: {debug_workflow_id}")
|
|
print("=" * 60)
|
|
|
|
# Step 1: Get current state
|
|
try:
|
|
workflows = self.client.list_workflows()
|
|
active_workflows = [w for w in workflows if w.get('active', False)]
|
|
|
|
print(f"Currently active workflows: {len(active_workflows)}")
|
|
for wf in active_workflows:
|
|
name = wf.get('name', 'Unknown')
|
|
wf_id = wf.get('id')
|
|
marker = "🎯 DEBUG" if wf_id == debug_workflow_id else "🔴 TO STOP"
|
|
print(f" {marker} {name} ({wf_id})")
|
|
|
|
except Exception as e:
|
|
print(f"❌ Failed to get workflow list: {e}")
|
|
return {'success': False, 'error': str(e)}
|
|
|
|
# Step 2: Try API method first
|
|
print(f"\\n🔄 Attempting API-based workflow stopping...")
|
|
api_result = self._stop_workflows_api(exclude_ids=[debug_workflow_id])
|
|
|
|
if api_result['stopped_count'] > 0:
|
|
print(f"✅ API method successful: {api_result['stopped_count']} workflows stopped")
|
|
return {
|
|
'success': True,
|
|
'method': 'api',
|
|
'result': api_result,
|
|
'isolated_workflow': debug_workflow_id
|
|
}
|
|
|
|
# Step 3: If API fails, offer docker method
|
|
print(f"⚠️ API method failed or incomplete")
|
|
print(f"Stopped: {api_result['stopped_count']}, Failed: {api_result['failed_count']}")
|
|
|
|
if api_result['failed_count'] > 0:
|
|
print("\\n💡 Alternative: Use Docker restart method?")
|
|
print("This will restart N8N container and stop ALL workflows,")
|
|
print(f"then reactivate only the debug workflow: {debug_workflow_id}")
|
|
|
|
return {
|
|
'success': False,
|
|
'method': 'api_failed',
|
|
'api_result': api_result,
|
|
'suggestion': 'use_docker_restart',
|
|
'docker_command': f"controller.stop_workflows_via_docker(['{debug_workflow_id}'])"
|
|
}
|
|
|
|
return {
|
|
'success': True,
|
|
'method': 'api_partial',
|
|
'result': api_result,
|
|
'isolated_workflow': debug_workflow_id
|
|
}
|
|
|
|
def _stop_workflows_api(self, exclude_ids: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
"""Internal API-based workflow stopping."""
|
|
exclude_ids = exclude_ids or []
|
|
workflows = self.client.list_workflows()
|
|
|
|
stopped = []
|
|
failed = []
|
|
skipped = []
|
|
|
|
for workflow in workflows:
|
|
workflow_id = workflow.get('id')
|
|
workflow_name = workflow.get('name', 'Unknown')
|
|
is_active = workflow.get('active', False)
|
|
|
|
if workflow_id in exclude_ids:
|
|
skipped.append({
|
|
'id': workflow_id,
|
|
'name': workflow_name,
|
|
'reason': 'excluded'
|
|
})
|
|
continue
|
|
|
|
if not is_active:
|
|
skipped.append({
|
|
'id': workflow_id,
|
|
'name': workflow_name,
|
|
'reason': 'already_inactive'
|
|
})
|
|
continue
|
|
|
|
# Store original state
|
|
self._original_states[workflow_id] = {
|
|
'active': is_active,
|
|
'name': workflow_name
|
|
}
|
|
|
|
try:
|
|
updated_workflow = {**workflow, 'active': False}
|
|
self.client.update_workflow(workflow_id, updated_workflow)
|
|
|
|
stopped.append({
|
|
'id': workflow_id,
|
|
'name': workflow_name,
|
|
'was_active': is_active
|
|
})
|
|
logger.info(f"Stopped workflow: {workflow_name} ({workflow_id})")
|
|
|
|
except Exception as e:
|
|
failed.append({
|
|
'id': workflow_id,
|
|
'name': workflow_name,
|
|
'error': str(e)
|
|
})
|
|
logger.error(f"Failed to stop workflow {workflow_name}: {e}")
|
|
|
|
return {
|
|
'stopped': stopped,
|
|
'failed': failed,
|
|
'skipped': skipped,
|
|
'stopped_count': len(stopped),
|
|
'failed_count': len(failed),
|
|
'skipped_count': len(skipped)
|
|
}
|
|
|
|
def verify_workflow_isolation(self, debug_workflow_id: str) -> Dict[str, Any]:
|
|
"""
|
|
Verify that only the debug workflow is active.
|
|
|
|
Args:
|
|
debug_workflow_id: ID of the workflow that should be active
|
|
|
|
Returns:
|
|
Verification result
|
|
"""
|
|
try:
|
|
workflows = self.client.list_workflows()
|
|
active_workflows = [w for w in workflows if w.get('active', False)]
|
|
|
|
debug_workflow_active = False
|
|
other_active_workflows = []
|
|
|
|
for wf in active_workflows:
|
|
wf_id = wf.get('id')
|
|
wf_name = wf.get('name', 'Unknown')
|
|
|
|
if wf_id == debug_workflow_id:
|
|
debug_workflow_active = True
|
|
else:
|
|
other_active_workflows.append({
|
|
'id': wf_id,
|
|
'name': wf_name
|
|
})
|
|
|
|
is_isolated = debug_workflow_active and len(other_active_workflows) == 0
|
|
|
|
return {
|
|
'success': True,
|
|
'is_isolated': is_isolated,
|
|
'debug_workflow_active': debug_workflow_active,
|
|
'other_active_count': len(other_active_workflows),
|
|
'other_active_workflows': other_active_workflows,
|
|
'total_active': len(active_workflows)
|
|
}
|
|
|
|
except Exception as e:
|
|
return {
|
|
'success': False,
|
|
'error': str(e)
|
|
}
|
|
|
|
def create_enhanced_workflow_controller():
|
|
"""Create an enhanced workflow controller instance."""
|
|
return EnhancedWorkflowController() |