Remove tools from LLMs (#2363)
This commit is contained in:
@@ -13,17 +13,22 @@ def mock_litellm():
|
||||
|
||||
|
||||
def test_generate_response_with_unsupported_model(mock_litellm):
|
||||
config = BaseLlmConfig(model="unsupported-model", temperature=0.7, max_tokens=100, top_p=1)
|
||||
config = BaseLlmConfig(
|
||||
model="unsupported-model", temperature=0.7, max_tokens=100, top_p=1
|
||||
)
|
||||
llm = litellm.LiteLLM(config)
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
|
||||
mock_litellm.supports_function_calling.return_value = False
|
||||
|
||||
with pytest.raises(ValueError, match="Model 'unsupported-model' in litellm does not support function calling."):
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match="Model 'unsupported-model' in LiteLLM does not support function calling.",
|
||||
):
|
||||
llm.generate_response(messages)
|
||||
|
||||
|
||||
def test_generate_response_without_tools(mock_litellm):
|
||||
def test_generate_response(mock_litellm):
|
||||
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1)
|
||||
llm = litellm.LiteLLM(config)
|
||||
messages = [
|
||||
@@ -32,7 +37,9 @@ def test_generate_response_without_tools(mock_litellm):
|
||||
]
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.choices = [Mock(message=Mock(content="I'm doing well, thank you for asking!"))]
|
||||
mock_response.choices = [
|
||||
Mock(message=Mock(content="I'm doing well, thank you for asking!"))
|
||||
]
|
||||
mock_litellm.completion.return_value = mock_response
|
||||
mock_litellm.supports_function_calling.return_value = True
|
||||
|
||||
@@ -42,50 +49,3 @@ def test_generate_response_without_tools(mock_litellm):
|
||||
model="gpt-4o", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0
|
||||
)
|
||||
assert response == "I'm doing well, thank you for asking!"
|
||||
|
||||
|
||||
def test_generate_response_with_tools(mock_litellm):
|
||||
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1)
|
||||
llm = litellm.LiteLLM(config)
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Add a new memory: Today is a sunny day."},
|
||||
]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "add_memory",
|
||||
"description": "Add a memory",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"data": {"type": "string", "description": "Data to add to memory"}},
|
||||
"required": ["data"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
mock_response = Mock()
|
||||
mock_message = Mock()
|
||||
mock_message.content = "I've added the memory for you."
|
||||
|
||||
mock_tool_call = Mock()
|
||||
mock_tool_call.function.name = "add_memory"
|
||||
mock_tool_call.function.arguments = '{"data": "Today is a sunny day."}'
|
||||
|
||||
mock_message.tool_calls = [mock_tool_call]
|
||||
mock_response.choices = [Mock(message=mock_message)]
|
||||
mock_litellm.completion.return_value = mock_response
|
||||
mock_litellm.supports_function_calling.return_value = True
|
||||
|
||||
response = llm.generate_response(messages, tools=tools)
|
||||
|
||||
mock_litellm.completion.assert_called_once_with(
|
||||
model="gpt-4o", messages=messages, temperature=0.7, max_tokens=100, top_p=1, tools=tools, tool_choice="auto"
|
||||
)
|
||||
|
||||
assert response["content"] == "I've added the memory for you."
|
||||
assert len(response["tool_calls"]) == 1
|
||||
assert response["tool_calls"][0]["name"] == "add_memory"
|
||||
assert response["tool_calls"][0]["arguments"] == {"data": "Today is a sunny day."}
|
||||
|
||||
Reference in New Issue
Block a user