Fix CI issues related to missing dependency (#3096)

This commit is contained in:
Deshraj Yadav
2025-07-03 18:52:50 -07:00
committed by GitHub
parent 2c496e6376
commit 7484eed4b2
32 changed files with 6150 additions and 828 deletions

View File

@@ -45,11 +45,7 @@ def get_food_recommendation(user_query: str, user_id):
"""Get food recommendation with memory context"""
# Search memory for relevant food preferences
memories_result = memory_client.search(
query=user_query,
user_id=user_id,
limit=5
)
memories_result = memory_client.search(query=user_query, user_id=user_id, limit=5)
# Add memory context to the message
memories = [f"- {result['memory']}" for result in memories_result]
@@ -71,6 +67,7 @@ def get_food_recommendation(user_query: str, user_id):
# Save audio file
if response.audio:
import time
timestamp = int(time.time())
filename = f"food_recommendation_{timestamp}.mp3"
write_audio_to_file(
@@ -118,7 +115,11 @@ def initialize_food_memory(user_id):
# Initialize the memory for the user once in order for the agent to learn the user preference
initialize_food_memory(user_id=USER_ID)
print(get_food_recommendation("Which type of restaurants should I go tonight for dinner and cuisines preferred?", user_id=USER_ID))
print(
get_food_recommendation(
"Which type of restaurants should I go tonight for dinner and cuisines preferred?", user_id=USER_ID
)
)
# OUTPUT: 🎵 Audio saved as food_recommendation_1750162610.mp3
# For dinner tonight, considering your love for healthy spic optionsy, you could try a nice Thai, Indian, or Mexican restaurant.
# You might find dishes with quinoa, chickpeas, tofu, and fresh herbs delightful. Enjoy your dinner!

View File

@@ -1,4 +1,4 @@
from agents import Agent, Runner, function_tool, handoffs, enable_verbose_stdout_logging
from agents import Agent, Runner, function_tool, enable_verbose_stdout_logging
from dotenv import load_dotenv
from mem0 import MemoryClient
@@ -35,7 +35,7 @@ travel_agent = Agent(
understand the user's travel preferences and history before making recommendations.
After providing your response, use store_conversation to save important details.""",
tools=[search_memory, save_memory],
model="gpt-4o"
model="gpt-4o",
)
health_agent = Agent(
@@ -44,7 +44,7 @@ health_agent = Agent(
understand the user's health goals and dietary preferences.
After providing advice, use store_conversation to save relevant information.""",
tools=[search_memory, save_memory],
model="gpt-4o"
model="gpt-4o",
)
# Triage agent with handoffs
@@ -55,7 +55,7 @@ triage_agent = Agent(
For health-related questions (fitness, diet, wellness, exercise), hand off to Health Advisor.
For general questions, you can handle them directly using available tools.""",
handoffs=[travel_agent, health_agent],
model="gpt-4o"
model="gpt-4o",
)
@@ -74,10 +74,7 @@ def chat_with_handoffs(user_input: str, user_id: str) -> str:
result = Runner.run_sync(triage_agent, user_input)
# Store the original conversation in memory
conversation = [
{"role": "user", "content": user_input},
{"role": "assistant", "content": result.final_output}
]
conversation = [{"role": "user", "content": user_input}, {"role": "assistant", "content": result.final_output}]
mem0.add(conversation, user_id=user_id)
return result.final_output

View File

@@ -34,96 +34,91 @@ config = {
"api_key": "vllm-api-key",
"temperature": 0.7,
"max_tokens": 100,
}
},
"embedder": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small"
}
},
},
"embedder": {"provider": "openai", "config": {"model": "text-embedding-3-small"}},
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "vllm_memories",
"host": "localhost",
"port": 6333
}
}
"config": {"collection_name": "vllm_memories", "host": "localhost", "port": 6333},
},
}
def main():
"""
Demonstrate vLLM integration with mem0
"""
print("--> Initializing mem0 with vLLM...")
# Initialize memory with vLLM
memory = Memory.from_config(config)
print("--> Memory initialized successfully!")
# Example conversations to store
conversations = [
{
"messages": [
{"role": "user", "content": "I love playing chess on weekends"},
{"role": "assistant", "content": "That's great! Chess is an excellent strategic game that helps improve critical thinking."}
{
"role": "assistant",
"content": "That's great! Chess is an excellent strategic game that helps improve critical thinking.",
},
],
"user_id": "user_123"
"user_id": "user_123",
},
{
"messages": [
{"role": "user", "content": "I'm learning Python programming"},
{"role": "assistant", "content": "Python is a fantastic language for beginners! What specific areas are you focusing on?"}
{
"role": "assistant",
"content": "Python is a fantastic language for beginners! What specific areas are you focusing on?",
},
],
"user_id": "user_123"
"user_id": "user_123",
},
{
"messages": [
{"role": "user", "content": "I prefer working late at night, I'm more productive then"},
{"role": "assistant", "content": "Many people find they're more creative and focused during nighttime hours. It's important to maintain a consistent schedule that works for you."}
{
"role": "assistant",
"content": "Many people find they're more creative and focused during nighttime hours. It's important to maintain a consistent schedule that works for you.",
},
],
"user_id": "user_123"
}
"user_id": "user_123",
},
]
print("\n--> Adding memories using vLLM...")
# Add memories - now powered by vLLM's high-performance inference
for i, conversation in enumerate(conversations, 1):
result = memory.add(
messages=conversation["messages"],
user_id=conversation["user_id"]
)
result = memory.add(messages=conversation["messages"], user_id=conversation["user_id"])
print(f"Memory {i} added: {result}")
print("\n🔍 Searching memories...")
# Search memories - vLLM will process the search and memory operations
search_queries = [
"What does the user like to do on weekends?",
"What is the user learning?",
"When is the user most productive?"
"When is the user most productive?",
]
for query in search_queries:
print(f"\nQuery: {query}")
memories = memory.search(
query=query,
user_id="user_123"
)
memories = memory.search(query=query, user_id="user_123")
for memory_item in memories:
print(f" - {memory_item['memory']}")
print("\n--> Getting all memories for user...")
all_memories = memory.get_all(user_id="user_123")
print(f"Total memories stored: {len(all_memories)}")
for memory_item in all_memories:
print(f" - {memory_item['memory']}")
print("\n--> vLLM integration demo completed successfully!")
print("\nBenefits of using vLLM:")
print(" -> 2.7x higher throughput compared to standard implementations")