Support model config in LLMs (#1495)

This commit is contained in:
Dev Khant
2024-07-18 21:51:40 +05:30
committed by GitHub
parent c411dc294e
commit 40c9abe484
15 changed files with 172 additions and 41 deletions

View File

@@ -1,6 +1,7 @@
import pytest
from unittest.mock import Mock, patch
from mem0.llms.groq import GroqLLM
from mem0.configs.llms.base import BaseLlmConfig
@pytest.fixture
def mock_groq_client():
@@ -11,7 +12,8 @@ def mock_groq_client():
def test_generate_response_without_tools(mock_groq_client):
llm = GroqLLM()
config = BaseLlmConfig(model="llama3-70b-8192", temperature=0.7, max_tokens=100, top_p=1.0)
llm = GroqLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"}
@@ -25,13 +27,17 @@ def test_generate_response_without_tools(mock_groq_client):
mock_groq_client.chat.completions.create.assert_called_once_with(
model="llama3-70b-8192",
messages=messages
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_groq_client):
llm = GroqLLM()
config = BaseLlmConfig(model="llama3-70b-8192", temperature=0.7, max_tokens=100, top_p=1.0)
llm = GroqLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."}
@@ -70,6 +76,9 @@ def test_generate_response_with_tools(mock_groq_client):
mock_groq_client.chat.completions.create.assert_called_once_with(
model="llama3-70b-8192",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto"
)

View File

@@ -2,6 +2,7 @@ import pytest
from unittest.mock import Mock, patch
from mem0.llms import litellm
from mem0.configs.llms.base import BaseLlmConfig
@pytest.fixture
def mock_litellm():
@@ -9,7 +10,8 @@ def mock_litellm():
yield mock_litellm
def test_generate_response_with_unsupported_model(mock_litellm):
llm = litellm.LiteLLM(model="unsupported-model")
config = BaseLlmConfig(model="unsupported-model", temperature=0.7, max_tokens=100, top_p=1)
llm = litellm.LiteLLM(config)
messages = [{"role": "user", "content": "Hello"}]
mock_litellm.supports_function_calling.return_value = False
@@ -19,7 +21,8 @@ def test_generate_response_with_unsupported_model(mock_litellm):
def test_generate_response_without_tools(mock_litellm):
llm = litellm.LiteLLM()
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1)
llm = litellm.LiteLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"}
@@ -34,13 +37,17 @@ def test_generate_response_without_tools(mock_litellm):
mock_litellm.completion.assert_called_once_with(
model="gpt-4o",
messages=messages
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_litellm):
llm = litellm.LiteLLM()
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1)
llm = litellm.LiteLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."}
@@ -80,6 +87,9 @@ def test_generate_response_with_tools(mock_litellm):
mock_litellm.completion.assert_called_once_with(
model="gpt-4o",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1,
tools=tools,
tool_choice="auto"
)

View File

@@ -1,6 +1,7 @@
import pytest
from unittest.mock import Mock, patch
from mem0.llms.openai import OpenAILLM
from mem0.configs.llms.base import BaseLlmConfig
@pytest.fixture
def mock_openai_client():
@@ -11,7 +12,8 @@ def mock_openai_client():
def test_generate_response_without_tools(mock_openai_client):
llm = OpenAILLM()
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1.0)
llm = OpenAILLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"}
@@ -25,13 +27,17 @@ def test_generate_response_without_tools(mock_openai_client):
mock_openai_client.chat.completions.create.assert_called_once_with(
model="gpt-4o",
messages=messages
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_openai_client):
llm = OpenAILLM()
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1.0)
llm = OpenAILLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."}
@@ -70,6 +76,9 @@ def test_generate_response_with_tools(mock_openai_client):
mock_openai_client.chat.completions.create.assert_called_once_with(
model="gpt-4o",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto"
)

View File

@@ -1,6 +1,7 @@
import pytest
from unittest.mock import Mock, patch
from mem0.llms.together import TogetherLLM
from mem0.configs.llms.base import BaseLlmConfig
@pytest.fixture
def mock_together_client():
@@ -11,7 +12,8 @@ def mock_together_client():
def test_generate_response_without_tools(mock_together_client):
llm = TogetherLLM()
config = BaseLlmConfig(model="mistralai/Mixtral-8x7B-Instruct-v0.1", temperature=0.7, max_tokens=100, top_p=1.0)
llm = TogetherLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"}
@@ -25,13 +27,17 @@ def test_generate_response_without_tools(mock_together_client):
mock_together_client.chat.completions.create.assert_called_once_with(
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
messages=messages
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_with_tools(mock_together_client):
llm = TogetherLLM()
config = BaseLlmConfig(model="mistralai/Mixtral-8x7B-Instruct-v0.1", temperature=0.7, max_tokens=100, top_p=1.0)
llm = TogetherLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Add a new memory: Today is a sunny day."}
@@ -70,6 +76,9 @@ def test_generate_response_with_tools(mock_together_client):
mock_together_client.chat.completions.create.assert_called_once_with(
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
tools=tools,
tool_choice="auto"
)