[Bug fix] Anthropic, Llama2 and VertexAI LLMs dependencies (#820)

This commit is contained in:
Sidharth Mohanty
2023-10-18 13:40:46 +05:30
committed by GitHub
parent d8a7d71344
commit 65a20aa457
6 changed files with 40 additions and 3 deletions

View File

@@ -1,3 +1,4 @@
import os
from unittest.mock import MagicMock, patch
import pytest
@@ -9,6 +10,7 @@ from embedchain.llm.anthropic import AnthropicLlm
@pytest.fixture
def anthropic_llm():
os.environ["ANTHROPIC_API_KEY"] = "test_api_key"
config = BaseLlmConfig(temperature=0.5, model="gpt2")
return AnthropicLlm(config)
@@ -31,7 +33,9 @@ def test_get_answer(anthropic_llm):
assert response == "Test Response"
mock_chat.assert_called_once_with(
temperature=anthropic_llm.config.temperature, model=anthropic_llm.config.model
anthropic_api_key="test_api_key",
temperature=anthropic_llm.config.temperature,
model=anthropic_llm.config.model,
)
mock_chat_instance.assert_called_once_with(
anthropic_llm._get_messages(prompt, system_prompt=anthropic_llm.config.system_prompt)
@@ -60,6 +64,8 @@ def test_get_answer_max_tokens_is_provided(anthropic_llm, caplog):
response = anthropic_llm._get_answer(prompt, config)
assert response == "Test Response"
mock_chat.assert_called_once_with(temperature=config.temperature, model=config.model)
mock_chat.assert_called_once_with(
anthropic_api_key="test_api_key", temperature=config.temperature, model=config.model
)
assert "Config option `max_tokens` is not supported by this model." in caplog.text

View File

@@ -1,3 +1,5 @@
import os
import pytest
import embedchain
@@ -22,6 +24,8 @@ class TestFactories:
],
)
def test_llm_factory_create(self, provider_name, config_data, expected_class):
os.environ["ANTHROPIC_API_KEY"] = "test_api_key"
os.environ["OPENAI_API_KEY"] = "test_api_key"
llm_instance = LlmFactory.create(provider_name, config_data)
assert isinstance(llm_instance, expected_class)