Remove unnecessary tools (#1979)

This commit is contained in:
Dev Khant
2024-10-22 11:47:16 +05:30
committed by GitHub
parent 078aa66b90
commit c5d298eec8
2 changed files with 0 additions and 91 deletions

View File

@@ -1,57 +0,0 @@
# TODO: Remove these tools if no issues are found for new memory addition logic
ADD_MEMORY_TOOL = {
"type": "function",
"function": {
"name": "add_memory",
"description": "Add a memory",
"parameters": {
"type": "object",
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
"required": ["data"],
"additionalProperties": False,
},
},
}
UPDATE_MEMORY_TOOL = {
"type": "function",
"function": {
"name": "update_memory",
"description": "Update memory provided ID and data",
"parameters": {
"type": "object",
"properties": {
"memory_id": {
"type": "string",
"description": "memory_id of the memory to update",
},
"data": {
"type": "string",
"description": "Updated data for the memory",
},
},
"required": ["memory_id", "data"],
"additionalProperties": False,
},
},
}
DELETE_MEMORY_TOOL = {
"type": "function",
"function": {
"name": "delete_memory",
"description": "Delete memory by memory_id",
"parameters": {
"type": "object",
"properties": {
"memory_id": {
"type": "string",
"description": "memory_id of the memory to delete",
}
},
"required": ["memory_id"],
"additionalProperties": False,
},
},
}

View File

@@ -4,7 +4,6 @@ import pytest
from mem0.configs.llms.base import BaseLlmConfig
from mem0.llms.ollama import OllamaLLM
from mem0.llms.utils.tools import ADD_MEMORY_TOOL
@pytest.fixture
@@ -33,36 +32,3 @@ def test_generate_response_without_tools(mock_ollama_client):
model="llama3.1:70b", messages=messages, options={"temperature": 0.7, "num_predict": 100, "top_p": 1.0}
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_ollama_client):
config = BaseLlmConfig(model="llama3.1:70b", temperature=0.7, max_tokens=100, top_p=1.0)
llm = OllamaLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
]
tools = [ADD_MEMORY_TOOL]
mock_response = {
"message": {
"content": "I've added the memory for you.",
"tool_calls": [{"function": {"name": "add_memory", "arguments": {"data": "Today is a sunny day."}}}],
}
}
mock_ollama_client.chat.return_value = mock_response
response = llm.generate_response(messages, tools=tools)
mock_ollama_client.chat.assert_called_once_with(
model="llama3.1:70b",
messages=messages,
options={"temperature": 0.7, "num_predict": 100, "top_p": 1.0},
tools=tools,
)
assert response["content"] == "I've added the memory for you."
assert len(response["tool_calls"]) == 1
assert response["tool_calls"][0]["name"] == "add_memory"
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}