Improve tests (#795)
This commit is contained in:
64
tests/llm/test_antrophic.py
Normal file
64
tests/llm/test_antrophic.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from embedchain.llm.antrophic import AntrophicLlm
|
||||
from embedchain.config import BaseLlmConfig
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def antrophic_llm():
|
||||
config = BaseLlmConfig(temperature=0.5, model="gpt2")
|
||||
return AntrophicLlm(config)
|
||||
|
||||
|
||||
def test_get_llm_model_answer(antrophic_llm):
|
||||
with patch.object(AntrophicLlm, "_get_answer", return_value="Test Response") as mock_method:
|
||||
prompt = "Test Prompt"
|
||||
response = antrophic_llm.get_llm_model_answer(prompt)
|
||||
assert response == "Test Response"
|
||||
mock_method.assert_called_once_with(prompt=prompt, config=antrophic_llm.config)
|
||||
|
||||
|
||||
def test_get_answer(antrophic_llm):
|
||||
with patch("langchain.chat_models.ChatAnthropic") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
response = antrophic_llm._get_answer(prompt, antrophic_llm.config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(
|
||||
temperature=antrophic_llm.config.temperature, model=antrophic_llm.config.model
|
||||
)
|
||||
mock_chat_instance.assert_called_once_with(
|
||||
antrophic_llm._get_messages(prompt, system_prompt=antrophic_llm.config.system_prompt)
|
||||
)
|
||||
|
||||
|
||||
def test_get_messages(antrophic_llm):
|
||||
prompt = "Test Prompt"
|
||||
system_prompt = "Test System Prompt"
|
||||
messages = antrophic_llm._get_messages(prompt, system_prompt)
|
||||
assert messages == [
|
||||
SystemMessage(content="Test System Prompt", additional_kwargs={}),
|
||||
HumanMessage(content="Test Prompt", additional_kwargs={}, example=False),
|
||||
]
|
||||
|
||||
|
||||
def test_get_answer_max_tokens_is_provided(antrophic_llm, caplog):
|
||||
with patch("langchain.chat_models.ChatAnthropic") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
config = antrophic_llm.config
|
||||
config.max_tokens = 500
|
||||
|
||||
response = antrophic_llm._get_answer(prompt, config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(temperature=config.temperature, model=config.model)
|
||||
|
||||
assert "Config option `max_tokens` is not supported by this model." in caplog.text
|
||||
91
tests/llm/test_azure_openai.py
Normal file
91
tests/llm/test_azure_openai.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from embedchain.llm.azure_openai import AzureOpenAILlm
|
||||
from embedchain.config import BaseLlmConfig
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def azure_openai_llm():
|
||||
config = BaseLlmConfig(
|
||||
deployment_name="azure_deployment",
|
||||
temperature=0.7,
|
||||
model="gpt-3.5-turbo",
|
||||
max_tokens=50,
|
||||
system_prompt="System Prompt",
|
||||
)
|
||||
return AzureOpenAILlm(config)
|
||||
|
||||
|
||||
def test_get_llm_model_answer(azure_openai_llm):
|
||||
with patch.object(AzureOpenAILlm, "_get_answer", return_value="Test Response") as mock_method:
|
||||
prompt = "Test Prompt"
|
||||
response = azure_openai_llm.get_llm_model_answer(prompt)
|
||||
assert response == "Test Response"
|
||||
mock_method.assert_called_once_with(prompt=prompt, config=azure_openai_llm.config)
|
||||
|
||||
|
||||
def test_get_answer(azure_openai_llm):
|
||||
with patch("langchain.chat_models.AzureChatOpenAI") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
response = azure_openai_llm._get_answer(prompt, azure_openai_llm.config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(
|
||||
deployment_name=azure_openai_llm.config.deployment_name,
|
||||
openai_api_version="2023-05-15",
|
||||
model_name=azure_openai_llm.config.model or "gpt-3.5-turbo",
|
||||
temperature=azure_openai_llm.config.temperature,
|
||||
max_tokens=azure_openai_llm.config.max_tokens,
|
||||
streaming=azure_openai_llm.config.stream,
|
||||
)
|
||||
mock_chat_instance.assert_called_once_with(
|
||||
azure_openai_llm._get_messages(prompt, system_prompt=azure_openai_llm.config.system_prompt)
|
||||
)
|
||||
|
||||
|
||||
def test_get_messages(azure_openai_llm):
|
||||
prompt = "Test Prompt"
|
||||
system_prompt = "Test System Prompt"
|
||||
messages = azure_openai_llm._get_messages(prompt, system_prompt)
|
||||
assert messages == [
|
||||
SystemMessage(content="Test System Prompt", additional_kwargs={}),
|
||||
HumanMessage(content="Test Prompt", additional_kwargs={}, example=False),
|
||||
]
|
||||
|
||||
|
||||
def test_get_answer_top_p_is_provided(azure_openai_llm, caplog):
|
||||
with patch("langchain.chat_models.AzureChatOpenAI") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
config = azure_openai_llm.config
|
||||
config.top_p = 0.5
|
||||
|
||||
response = azure_openai_llm._get_answer(prompt, config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(
|
||||
deployment_name=config.deployment_name,
|
||||
openai_api_version="2023-05-15",
|
||||
model_name=config.model or "gpt-3.5-turbo",
|
||||
temperature=config.temperature,
|
||||
max_tokens=config.max_tokens,
|
||||
streaming=config.stream,
|
||||
)
|
||||
mock_chat_instance.assert_called_once_with(
|
||||
azure_openai_llm._get_messages(prompt, system_prompt=config.system_prompt)
|
||||
)
|
||||
|
||||
assert "Config option `top_p` is not supported by this model." in caplog.text
|
||||
|
||||
|
||||
def test_when_no_deployment_name_provided():
|
||||
config = BaseLlmConfig(temperature=0.7, model="gpt-3.5-turbo", max_tokens=50, system_prompt="System Prompt")
|
||||
with pytest.raises(ValueError):
|
||||
llm = AzureOpenAILlm(config)
|
||||
llm.get_llm_model_answer("Test Prompt")
|
||||
63
tests/llm/test_vertex_ai.py
Normal file
63
tests/llm/test_vertex_ai.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from embedchain.llm.vertex_ai import VertexAiLlm
|
||||
from embedchain.config import BaseLlmConfig
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def vertexai_llm():
|
||||
config = BaseLlmConfig(temperature=0.6, model="vertexai_model", system_prompt="System Prompt")
|
||||
return VertexAiLlm(config)
|
||||
|
||||
|
||||
def test_get_llm_model_answer(vertexai_llm):
|
||||
with patch.object(VertexAiLlm, "_get_answer", return_value="Test Response") as mock_method:
|
||||
prompt = "Test Prompt"
|
||||
response = vertexai_llm.get_llm_model_answer(prompt)
|
||||
assert response == "Test Response"
|
||||
mock_method.assert_called_once_with(prompt=prompt, config=vertexai_llm.config)
|
||||
|
||||
|
||||
def test_get_answer_with_warning(vertexai_llm, caplog):
|
||||
with patch("langchain.chat_models.ChatVertexAI") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
config = vertexai_llm.config
|
||||
config.top_p = 0.5
|
||||
|
||||
response = vertexai_llm._get_answer(prompt, config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(temperature=config.temperature, model=config.model)
|
||||
|
||||
assert "Config option `top_p` is not supported by this model." in caplog.text
|
||||
|
||||
|
||||
def test_get_answer_no_warning(vertexai_llm, caplog):
|
||||
with patch("langchain.chat_models.ChatVertexAI") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
config = vertexai_llm.config
|
||||
config.top_p = 1.0
|
||||
|
||||
response = vertexai_llm._get_answer(prompt, config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(temperature=config.temperature, model=config.model)
|
||||
|
||||
assert "Config option `top_p` is not supported by this model." not in caplog.text
|
||||
|
||||
|
||||
def test_get_messages(vertexai_llm):
|
||||
prompt = "Test Prompt"
|
||||
system_prompt = "Test System Prompt"
|
||||
messages = vertexai_llm._get_messages(prompt, system_prompt)
|
||||
assert messages == [
|
||||
SystemMessage(content="Test System Prompt", additional_kwargs={}),
|
||||
HumanMessage(content="Test Prompt", additional_kwargs={}, example=False),
|
||||
]
|
||||
Reference in New Issue
Block a user