Google AI ADK Integration Docs (#3086)
This commit is contained in:
@@ -254,6 +254,7 @@
|
|||||||
"integrations/autogen",
|
"integrations/autogen",
|
||||||
"integrations/crewai",
|
"integrations/crewai",
|
||||||
"integrations/openai-agents-sdk",
|
"integrations/openai-agents-sdk",
|
||||||
|
"integrations/google-ai-adk",
|
||||||
"integrations/mastra",
|
"integrations/mastra",
|
||||||
"integrations/vercel-ai-sdk",
|
"integrations/vercel-ai-sdk",
|
||||||
"integrations/livekit",
|
"integrations/livekit",
|
||||||
|
|||||||
@@ -164,7 +164,7 @@ By combining CrewAI with Mem0, you can create sophisticated AI systems that main
|
|||||||
|
|
||||||
## Help
|
## Help
|
||||||
|
|
||||||
- For CrewAI documentation, visit [CrewAI Documentation](https://docs.crewai.com/)
|
- [CrewAI Documentation](https://docs.crewai.com/)
|
||||||
- For Mem0 documentation, refer to the [Mem0 Platform](https://app.mem0.ai/)
|
- [Mem0 Platform](https://app.mem0.ai/)
|
||||||
|
|
||||||
<Snippet file="get-help.mdx" />
|
<Snippet file="get-help.mdx" />
|
||||||
|
|||||||
284
docs/integrations/google-ai-adk.mdx
Normal file
284
docs/integrations/google-ai-adk.mdx
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
---
|
||||||
|
title: Google Agent Development Kit
|
||||||
|
---
|
||||||
|
|
||||||
|
<Snippet file="security-compliance.mdx" />
|
||||||
|
|
||||||
|
Integrate [**Mem0**](https://github.com/mem0ai/mem0) with [Google Agent Development Kit (ADK)](https://github.com/google/adk-python), an open-source framework for building multi-agent workflows. This integration enables agents to access persistent memory across conversations, enhancing context retention and personalization.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
1. Store and retrieve memories from Mem0 within Google ADK agents
|
||||||
|
2. Multi-agent workflows with shared memory across hierarchies
|
||||||
|
3. Retrieve relevant memories from past conversations
|
||||||
|
4. Personalized responses
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before setting up Mem0 with Google ADK, ensure you have:
|
||||||
|
|
||||||
|
1. Installed the required packages:
|
||||||
|
```bash
|
||||||
|
pip install google-adk mem0ai
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Valid API keys:
|
||||||
|
- [Mem0 API Key](https://app.mem0.ai/dashboard/api-keys)
|
||||||
|
- Google AI Studio API Key
|
||||||
|
|
||||||
|
## Basic Integration Example
|
||||||
|
|
||||||
|
The following example demonstrates how to create a Google ADK agent with Mem0 memory integration:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from google.adk.agents import Agent
|
||||||
|
from google.adk.runners import Runner
|
||||||
|
from google.adk.sessions import InMemorySessionService
|
||||||
|
from google.genai import types
|
||||||
|
from mem0 import MemoryClient
|
||||||
|
|
||||||
|
# Set up environment variables
|
||||||
|
os.environ["GOOGLE_API_KEY"] = "your-google-api-key"
|
||||||
|
os.environ["MEM0_API_KEY"] = "your-mem0-api-key"
|
||||||
|
|
||||||
|
# Initialize Mem0 client
|
||||||
|
mem0 = MemoryClient()
|
||||||
|
|
||||||
|
# Define memory function tools
|
||||||
|
def search_memory(query: str, user_id: str) -> dict:
|
||||||
|
"""Search through past conversations and memories"""
|
||||||
|
memories = mem0.search(query, user_id=user_id)
|
||||||
|
if memories:
|
||||||
|
memory_context = "\n".join([f"- {mem['memory']}" for mem in memories])
|
||||||
|
return {"status": "success", "memories": memory_context}
|
||||||
|
return {"status": "no_memories", "message": "No relevant memories found"}
|
||||||
|
|
||||||
|
def save_memory(content: str, user_id: str) -> dict:
|
||||||
|
"""Save important information to memory"""
|
||||||
|
try:
|
||||||
|
mem0.add([{"role": "user", "content": content}], user_id=user_id)
|
||||||
|
return {"status": "success", "message": "Information saved to memory"}
|
||||||
|
except Exception as e:
|
||||||
|
return {"status": "error", "message": f"Failed to save memory: {str(e)}"}
|
||||||
|
|
||||||
|
# Create agent with memory capabilities
|
||||||
|
personal_assistant = Agent(
|
||||||
|
name="personal_assistant",
|
||||||
|
model="gemini-2.0-flash",
|
||||||
|
instruction="""You are a helpful personal assistant with memory capabilities.
|
||||||
|
Use the search_memory function to recall past conversations and user preferences.
|
||||||
|
Use the save_memory function to store important information about the user.
|
||||||
|
Always personalize your responses based on available memory.""",
|
||||||
|
description="A personal assistant that remembers user preferences and past interactions",
|
||||||
|
tools=[search_memory, save_memory]
|
||||||
|
)
|
||||||
|
|
||||||
|
def chat_with_agent(user_input: str, user_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Handle user input with automatic memory integration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_input: The user's message
|
||||||
|
user_id: Unique identifier for the user
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The agent's response
|
||||||
|
"""
|
||||||
|
# Set up session and runner
|
||||||
|
session_service = InMemorySessionService()
|
||||||
|
session = session_service.create_session(
|
||||||
|
app_name="memory_assistant",
|
||||||
|
user_id=user_id,
|
||||||
|
session_id=f"session_{user_id}"
|
||||||
|
)
|
||||||
|
runner = Runner(agent=personal_assistant, app_name="memory_assistant", session_service=session_service)
|
||||||
|
|
||||||
|
# Create content and run agent
|
||||||
|
content = types.Content(role='user', parts=[types.Part(text=user_input)])
|
||||||
|
events = runner.run(user_id=user_id, session_id=session.id, new_message=content)
|
||||||
|
|
||||||
|
# Extract final response
|
||||||
|
for event in events:
|
||||||
|
if event.is_final_response():
|
||||||
|
response = event.content.parts[0].text
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
return "No response generated"
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
response = chat_with_agent(
|
||||||
|
"I love Italian food and I'm planning a trip to Rome next month",
|
||||||
|
user_id="alice"
|
||||||
|
)
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Agent Hierarchy with Shared Memory
|
||||||
|
|
||||||
|
Create specialized agents in a hierarchy that share memory:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from google.adk.tools.agent_tool import AgentTool
|
||||||
|
|
||||||
|
# Travel specialist agent
|
||||||
|
travel_agent = Agent(
|
||||||
|
name="travel_specialist",
|
||||||
|
model="gemini-2.0-flash",
|
||||||
|
instruction="""You are a travel planning specialist. Use get_user_context to
|
||||||
|
understand the user's travel preferences and history before making recommendations.
|
||||||
|
After providing advice, use store_interaction to save travel-related information.""",
|
||||||
|
description="Specialist in travel planning and recommendations",
|
||||||
|
tools=[search_memory, save_memory]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Health advisor agent
|
||||||
|
health_agent = Agent(
|
||||||
|
name="health_advisor",
|
||||||
|
model="gemini-2.0-flash",
|
||||||
|
instruction="""You are a health and wellness advisor. Use get_user_context to
|
||||||
|
understand the user's health goals and dietary preferences.
|
||||||
|
After providing advice, use store_interaction to save health-related information.""",
|
||||||
|
description="Specialist in health and wellness advice",
|
||||||
|
tools=[search_memory, save_memory]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Coordinator agent that delegates to specialists
|
||||||
|
coordinator_agent = Agent(
|
||||||
|
name="coordinator",
|
||||||
|
model="gemini-2.0-flash",
|
||||||
|
instruction="""You are a coordinator that delegates requests to specialist agents.
|
||||||
|
For travel-related questions (trips, hotels, flights, destinations), delegate to the travel specialist.
|
||||||
|
For health-related questions (fitness, diet, wellness, exercise), delegate to the health advisor.
|
||||||
|
Use get_user_context to understand the user before delegation.""",
|
||||||
|
description="Coordinates requests between specialist agents",
|
||||||
|
tools=[
|
||||||
|
AgentTool(agent=travel_agent, skip_summarization=False),
|
||||||
|
AgentTool(agent=health_agent, skip_summarization=False)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def chat_with_specialists(user_input: str, user_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Handle user input with specialist agent delegation and memory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_input: The user's message
|
||||||
|
user_id: Unique identifier for the user
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The specialist agent's response
|
||||||
|
"""
|
||||||
|
session_service = InMemorySessionService()
|
||||||
|
session = session_service.create_session(
|
||||||
|
app_name="specialist_system",
|
||||||
|
user_id=user_id,
|
||||||
|
session_id=f"session_{user_id}"
|
||||||
|
)
|
||||||
|
runner = Runner(agent=coordinator_agent, app_name="specialist_system", session_service=session_service)
|
||||||
|
|
||||||
|
content = types.Content(role='user', parts=[types.Part(text=user_input)])
|
||||||
|
events = runner.run(user_id=user_id, session_id=session.id, new_message=content)
|
||||||
|
|
||||||
|
for event in events:
|
||||||
|
if event.is_final_response():
|
||||||
|
response = event.content.parts[0].text
|
||||||
|
|
||||||
|
# Store the conversation in shared memory
|
||||||
|
conversation = [
|
||||||
|
{"role": "user", "content": user_input},
|
||||||
|
{"role": "assistant", "content": response}
|
||||||
|
]
|
||||||
|
mem0.add(conversation, user_id=user_id)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
return "No response generated"
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
response = chat_with_specialists("Plan a healthy meal for my Italy trip", user_id="alice")
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Quick Start Chat Interface
|
||||||
|
|
||||||
|
Simple interactive chat with memory and Google ADK:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def interactive_chat():
|
||||||
|
"""Interactive chat interface with memory and ADK"""
|
||||||
|
user_id = input("Enter your user ID: ") or "demo_user"
|
||||||
|
print(f"Chat started for user: {user_id}")
|
||||||
|
print("Type 'quit' to exit")
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
user_input = input("\nYou: ")
|
||||||
|
|
||||||
|
if user_input.lower() == 'quit':
|
||||||
|
print("Goodbye! Your conversation has been saved to memory.")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
response = chat_with_specialists(user_input, user_id)
|
||||||
|
print(f"Assistant: {response}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
interactive_chat()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
### 1. Memory-Enhanced Function Tools
|
||||||
|
- **Function Tools**: Standard Python functions that can search and save memories
|
||||||
|
- **Tool Context**: Access to session state and memory through function parameters
|
||||||
|
- **Structured Returns**: Dictionary-based returns with status indicators for better LLM understanding
|
||||||
|
|
||||||
|
### 2. Multi-Agent Memory Sharing
|
||||||
|
- **Agent-as-a-Tool**: Specialists can be called as tools while maintaining shared memory
|
||||||
|
- **Hierarchical Delegation**: Coordinator agents route to specialists based on context
|
||||||
|
- **Memory Categories**: Store interactions with metadata for better organization
|
||||||
|
|
||||||
|
### 3. Flexible Memory Operations
|
||||||
|
- **Search Capabilities**: Retrieve relevant memories through conversation history
|
||||||
|
- **User Segmentation**: Organize memories by user ID
|
||||||
|
- **Memory Management**: Built-in tools for saving and retrieving information
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
Customize memory behavior and agent setup:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Configure memory search with metadata
|
||||||
|
memories = mem0.search(
|
||||||
|
query="travel preferences",
|
||||||
|
user_id="alice",
|
||||||
|
limit=5,
|
||||||
|
filters={"category": "travel"} # Filter by category if supported
|
||||||
|
)
|
||||||
|
|
||||||
|
# Configure agent with custom model settings
|
||||||
|
agent = Agent(
|
||||||
|
name="custom_agent",
|
||||||
|
model="gemini-2.0-flash", # or use LiteLLM for other models
|
||||||
|
instruction="Custom agent behavior",
|
||||||
|
tools=[memory_tools],
|
||||||
|
# Additional ADK configurations
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use Google Cloud Vertex AI instead of AI Studio
|
||||||
|
os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "True"
|
||||||
|
os.environ["GOOGLE_CLOUD_PROJECT"] = "your-project-id"
|
||||||
|
os.environ["GOOGLE_CLOUD_LOCATION"] = "us-central1"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Help
|
||||||
|
|
||||||
|
- [Google ADK Documentation](https://google.github.io/adk-docs/)
|
||||||
|
- [Mem0 Platform](https://app.mem0.ai/)
|
||||||
|
- If you need further assistance, please feel free to reach out to us through the following methods:
|
||||||
|
|
||||||
|
<Snippet file="get-help.mdx" />
|
||||||
@@ -152,7 +152,7 @@ By integrating LangChain with Mem0, you can build a personalized Travel Agent AI
|
|||||||
## Help
|
## Help
|
||||||
|
|
||||||
- For more details on LangChain, visit the [LangChain documentation](https://python.langchain.com/).
|
- For more details on LangChain, visit the [LangChain documentation](https://python.langchain.com/).
|
||||||
- For Mem0 documentation, refer to the [Mem0 Platform](https://app.mem0.ai/).
|
- [Mem0 Platform](https://app.mem0.ai/).
|
||||||
- If you need further assistance, please feel free to reach out to us through the following methods:
|
- If you need further assistance, please feel free to reach out to us through the following methods:
|
||||||
|
|
||||||
<Snippet file="get-help.mdx" />
|
<Snippet file="get-help.mdx" />
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ Integrate [**Mem0**](https://github.com/mem0ai/mem0) with [OpenAI Agents SDK](ht
|
|||||||
|
|
||||||
1. Store and retrieve memories from Mem0 within OpenAI agents
|
1. Store and retrieve memories from Mem0 within OpenAI agents
|
||||||
2. Multi-agent workflows with shared memory
|
2. Multi-agent workflows with shared memory
|
||||||
3. Semantic search for relevant past conversations
|
3. Retrieve relevant memories for past conversations
|
||||||
4. Personalized responses based on user history
|
4. Personalized responses based on user history
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
@@ -44,7 +44,7 @@ mem0 = MemoryClient()
|
|||||||
|
|
||||||
# Define memory tools for the agent
|
# Define memory tools for the agent
|
||||||
@function_tool
|
@function_tool
|
||||||
def search_memory(query: str, user_id: str = "default") -> str:
|
def search_memory(query: str, user_id: str) -> str:
|
||||||
"""Search through past conversations and memories"""
|
"""Search through past conversations and memories"""
|
||||||
memories = mem0.search(query, user_id=user_id, limit=3)
|
memories = mem0.search(query, user_id=user_id, limit=3)
|
||||||
if memories:
|
if memories:
|
||||||
@@ -52,7 +52,7 @@ def search_memory(query: str, user_id: str = "default") -> str:
|
|||||||
return "No relevant memories found."
|
return "No relevant memories found."
|
||||||
|
|
||||||
@function_tool
|
@function_tool
|
||||||
def save_memory(content: str, user_id: str = "default") -> str:
|
def save_memory(content: str, user_id: str) -> str:
|
||||||
"""Save important information to memory"""
|
"""Save important information to memory"""
|
||||||
mem0.add([{"role": "user", "content": content}], user_id=user_id)
|
mem0.add([{"role": "user", "content": content}], user_id=user_id)
|
||||||
return "Information saved to memory."
|
return "Information saved to memory."
|
||||||
@@ -68,7 +68,7 @@ agent = Agent(
|
|||||||
model="gpt-4o"
|
model="gpt-4o"
|
||||||
)
|
)
|
||||||
|
|
||||||
def chat_with_memory(user_input: str, user_id: str) -> str:
|
def chat_with_agent(user_input: str, user_id: str) -> str:
|
||||||
"""
|
"""
|
||||||
Handle user input with automatic memory integration.
|
Handle user input with automatic memory integration.
|
||||||
|
|
||||||
@@ -82,22 +82,24 @@ def chat_with_memory(user_input: str, user_id: str) -> str:
|
|||||||
# Run the agent (it will automatically use memory tools when needed)
|
# Run the agent (it will automatically use memory tools when needed)
|
||||||
result = Runner.run_sync(agent, user_input)
|
result = Runner.run_sync(agent, user_input)
|
||||||
|
|
||||||
# Store the conversation in memory
|
|
||||||
conversation = [
|
|
||||||
{"role": "user", "content": user_input},
|
|
||||||
{"role": "assistant", "content": result.final_output}
|
|
||||||
]
|
|
||||||
mem0.add(conversation, user_id=user_id)
|
|
||||||
|
|
||||||
return result.final_output
|
return result.final_output
|
||||||
|
|
||||||
# Example usage
|
# Example usage
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
response = chat_with_memory(
|
|
||||||
|
# preferences will be saved in memory (using save_memory tool)
|
||||||
|
response_1 = chat_with_agent(
|
||||||
"I love Italian food and I'm planning a trip to Rome next month",
|
"I love Italian food and I'm planning a trip to Rome next month",
|
||||||
user_id="alice"
|
user_id="alice"
|
||||||
)
|
)
|
||||||
print(response)
|
print(response_1)
|
||||||
|
|
||||||
|
# memory will be retrieved using search_memory tool to answer the user query
|
||||||
|
response_2 = chat_with_agent(
|
||||||
|
"Give me some recommendations for food",
|
||||||
|
user_id="alice"
|
||||||
|
)
|
||||||
|
print(response_2)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Multi-Agent Workflow with Handoffs
|
## Multi-Agent Workflow with Handoffs
|
||||||
@@ -133,12 +135,11 @@ triage_agent = Agent(
|
|||||||
For travel-related questions (trips, hotels, flights, destinations), hand off to Travel Planner.
|
For travel-related questions (trips, hotels, flights, destinations), hand off to Travel Planner.
|
||||||
For health-related questions (fitness, diet, wellness, exercise), hand off to Health Advisor.
|
For health-related questions (fitness, diet, wellness, exercise), hand off to Health Advisor.
|
||||||
For general questions, you can handle them directly using available tools.""",
|
For general questions, you can handle them directly using available tools.""",
|
||||||
tools=[get_user_context],
|
|
||||||
handoffs=[travel_agent, health_agent],
|
handoffs=[travel_agent, health_agent],
|
||||||
model="gpt-4o"
|
model="gpt-4o"
|
||||||
)
|
)
|
||||||
|
|
||||||
def chat_with_handoffs(user_input: str, user_id: str = "default") -> str:
|
def chat_with_handoffs(user_input: str, user_id: str) -> str:
|
||||||
"""
|
"""
|
||||||
Handle user input with automatic agent handoffs and memory integration.
|
Handle user input with automatic agent handoffs and memory integration.
|
||||||
|
|
||||||
@@ -230,5 +231,6 @@ mem0.add(
|
|||||||
|
|
||||||
- [OpenAI Agents SDK Documentation](https://openai.github.io/openai-agents-python/)
|
- [OpenAI Agents SDK Documentation](https://openai.github.io/openai-agents-python/)
|
||||||
- [Mem0 Platform](https://app.mem0.ai/)
|
- [Mem0 Platform](https://app.mem0.ai/)
|
||||||
|
- If you need further assistance, please feel free to reach out to us through the following methods:
|
||||||
|
|
||||||
<Snippet file="get-help.mdx" />
|
<Snippet file="get-help.mdx" />
|
||||||
88
examples/misc/test.py
Normal file
88
examples/misc/test.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
from agents import Agent, Runner, function_tool, handoffs, enable_verbose_stdout_logging
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
from mem0 import MemoryClient
|
||||||
|
|
||||||
|
enable_verbose_stdout_logging()
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Initialize Mem0 client
|
||||||
|
mem0 = MemoryClient()
|
||||||
|
|
||||||
|
|
||||||
|
# Define memory tools for the agent
|
||||||
|
@function_tool
|
||||||
|
def search_memory(query: str, user_id: str) -> str:
|
||||||
|
"""Search through past conversations and memories"""
|
||||||
|
memories = mem0.search(query, user_id=user_id, limit=3)
|
||||||
|
if memories:
|
||||||
|
return "\n".join([f"- {mem['memory']}" for mem in memories])
|
||||||
|
return "No relevant memories found."
|
||||||
|
|
||||||
|
|
||||||
|
@function_tool
|
||||||
|
def save_memory(content: str, user_id: str) -> str:
|
||||||
|
"""Save important information to memory"""
|
||||||
|
mem0.add([{"role": "user", "content": content}], user_id=user_id)
|
||||||
|
return "Information saved to memory."
|
||||||
|
|
||||||
|
|
||||||
|
# Specialized agents
|
||||||
|
travel_agent = Agent(
|
||||||
|
name="Travel Planner",
|
||||||
|
instructions="""You are a travel planning specialist. Use get_user_context to
|
||||||
|
understand the user's travel preferences and history before making recommendations.
|
||||||
|
After providing your response, use store_conversation to save important details.""",
|
||||||
|
tools=[search_memory, save_memory],
|
||||||
|
model="gpt-4o"
|
||||||
|
)
|
||||||
|
|
||||||
|
health_agent = Agent(
|
||||||
|
name="Health Advisor",
|
||||||
|
instructions="""You are a health and wellness advisor. Use get_user_context to
|
||||||
|
understand the user's health goals and dietary preferences.
|
||||||
|
After providing advice, use store_conversation to save relevant information.""",
|
||||||
|
tools=[search_memory, save_memory],
|
||||||
|
model="gpt-4o"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Triage agent with handoffs
|
||||||
|
triage_agent = Agent(
|
||||||
|
name="Personal Assistant",
|
||||||
|
instructions="""You are a helpful personal assistant that routes requests to specialists.
|
||||||
|
For travel-related questions (trips, hotels, flights, destinations), hand off to Travel Planner.
|
||||||
|
For health-related questions (fitness, diet, wellness, exercise), hand off to Health Advisor.
|
||||||
|
For general questions, you can handle them directly using available tools.""",
|
||||||
|
handoffs=[travel_agent, health_agent],
|
||||||
|
model="gpt-4o"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def chat_with_handoffs(user_input: str, user_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Handle user input with automatic agent handoffs and memory integration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_input: The user's message
|
||||||
|
user_id: Unique identifier for the user
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The agent's response
|
||||||
|
"""
|
||||||
|
# Run the triage agent (it will automatically handoffs when needed)
|
||||||
|
result = Runner.run_sync(triage_agent, user_input)
|
||||||
|
|
||||||
|
# Store the original conversation in memory
|
||||||
|
conversation = [
|
||||||
|
{"role": "user", "content": user_input},
|
||||||
|
{"role": "assistant", "content": result.final_output}
|
||||||
|
]
|
||||||
|
mem0.add(conversation, user_id=user_id)
|
||||||
|
|
||||||
|
return result.final_output
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
# response = chat_with_handoffs("Which places should I vist?", user_id="alex")
|
||||||
|
# print(response)
|
||||||
Reference in New Issue
Block a user