diff --git a/config.py b/config.py
index ac6faf4b..ce5a92da 100644
--- a/config.py
+++ b/config.py
@@ -54,7 +54,7 @@ def get_mem0_config(config: SystemConfig, provider: str = "openai") -> Dict[str,
base_config["vector_store"] = {
"provider": "supabase",
"config": {
- "connection_string": "postgresql://supabase_admin:CzkaYmRvc26Y@localhost:5435/postgres",
+ "connection_string": os.getenv("SUPABASE_CONNECTION_STRING", "postgresql://supabase_admin:CzkaYmRvc26Y@localhost:5435/postgres"),
"collection_name": "mem0_working_test",
"embedding_model_dims": 768 # nomic-embed-text dimension
}
diff --git a/docker-compose.api-localai.yml b/docker-compose.api-localai.yml
new file mode 100644
index 00000000..c4d20514
--- /dev/null
+++ b/docker-compose.api-localai.yml
@@ -0,0 +1,31 @@
+version: '3.8'
+
+services:
+ mem0-api:
+ build: .
+ container_name: mem0-api-localai
+ networks:
+ - localai
+ ports:
+ - "8080:8080"
+ environment:
+ - API_HOST=0.0.0.0
+ - API_PORT=8080
+ - API_KEYS=mem0_dev_key_123456789,mem0_docker_key_987654321
+ - ADMIN_API_KEYS=mem0_admin_key_111222333
+ - RATE_LIMIT_REQUESTS=100
+ - RATE_LIMIT_WINDOW_MINUTES=1
+ - OLLAMA_BASE_URL=http://172.21.0.1:11434
+ - SUPABASE_CONNECTION_STRING=postgresql://supabase_admin:CzkaYmRvc26Y@172.21.0.12:5432/postgres
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ volumes:
+ - ./logs:/app/logs:rw
+
+networks:
+ localai:
+ external: true
\ No newline at end of file
diff --git a/docs/introduction.mdx b/docs/introduction.mdx
index b7d20e13..c96045c0 100644
--- a/docs/introduction.mdx
+++ b/docs/introduction.mdx
@@ -101,7 +101,7 @@ graph TB
| **Supabase** | ✅ Ready | Self-hosted database with pgvector on localhost:8000 |
| **Ollama** | ✅ Ready | 21+ local models available on localhost:11434 |
| **Mem0 Core** | ✅ Ready | Memory management system v0.1.115 |
-| **REST API** | ✅ Ready | FastAPI server with full CRUD, auth, and testing on localhost:8080 |
+| **REST API** | ✅ Ready | FastAPI server with full CRUD, auth, testing, and Docker networking support |
## Getting Started
diff --git a/docs/open-source/features/rest-api.mdx b/docs/open-source/features/rest-api.mdx
index d6419274..e00df6fa 100644
--- a/docs/open-source/features/rest-api.mdx
+++ b/docs/open-source/features/rest-api.mdx
@@ -97,6 +97,28 @@ Mem0 provides a comprehensive REST API server built with FastAPI. The implementa
The Docker deployment automatically configures external access on `0.0.0.0:8080`.
+
+
+ For integration with N8N workflows or other containerized services:
+
+ ```bash
+ # Deploy to existing Docker network (e.g., localai)
+ docker-compose -f docker-compose.api-localai.yml up -d
+
+ # Find the container IP address
+ docker inspect mem0-api-localai --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'
+ ```
+
+ **Usage in N8N HTTP Request Node:**
+ - **URL**: `http://172.21.0.17:8080/v1/memories` (use actual container IP)
+ - **Method**: POST
+ - **Headers**: `Authorization: Bearer mem0_dev_key_123456789`
+ - **Body**: JSON object with `messages`, `user_id`, and `metadata`
+
+
+ **Perfect for Docker ecosystems!** Automatically handles Ollama and Supabase connections within the same network. Use container IP addresses for reliable service-to-service communication.
+
+
## API Endpoints
diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx
index dbd4608d..58e860ef 100644
--- a/docs/quickstart.mdx
+++ b/docs/quickstart.mdx
@@ -75,9 +75,28 @@ Our Phase 2 implementation provides a production-ready REST API with two deploym
The Docker deployment automatically configures the API to accept external connections on `0.0.0.0:8080`.
+
+
+ For integration with N8N or other Docker containers on custom networks:
+
+ ```bash
+ # Deploy to localai network (or your custom network)
+ docker-compose -f docker-compose.api-localai.yml up -d
+
+ # Find container IP for connections
+ docker inspect mem0-api-localai --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'
+ ```
+
+ **Access:** http://CONTAINER_IP:8080 (from within Docker network)
+ **Example:** http://172.21.0.17:8080
+
+
+ Perfect for N8N workflows and Docker-to-Docker communication. Automatically handles service dependencies like Ollama and Supabase connections.
+
+
-Both options provide:
+All deployment options provide:
- Interactive documentation at `/docs`
- Full authentication and rate limiting
- Comprehensive error handling
diff --git a/screenshots/Snímek obrazovky 2025-08-01 081218.png b/screenshots/Snímek obrazovky 2025-08-01 081218.png
new file mode 100644
index 00000000..4f124219
Binary files /dev/null and b/screenshots/Snímek obrazovky 2025-08-01 081218.png differ