[Refactor] Improve logging package wide (#1315)
This commit is contained in:
@@ -50,22 +50,3 @@ def test_get_messages(anthropic_llm):
|
||||
SystemMessage(content="Test System Prompt", additional_kwargs={}),
|
||||
HumanMessage(content="Test Prompt", additional_kwargs={}, example=False),
|
||||
]
|
||||
|
||||
|
||||
def test_get_answer_max_tokens_is_provided(anthropic_llm, caplog):
|
||||
with patch("langchain_community.chat_models.ChatAnthropic") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
config = anthropic_llm.config
|
||||
config.max_tokens = 500
|
||||
|
||||
response = anthropic_llm._get_answer(prompt, config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(
|
||||
anthropic_api_key="test_api_key", temperature=config.temperature, model=config.model
|
||||
)
|
||||
|
||||
assert "Config option `max_tokens` is not supported by this model." in caplog.text
|
||||
|
||||
@@ -59,33 +59,6 @@ def test_get_messages(azure_openai_llm):
|
||||
]
|
||||
|
||||
|
||||
def test_get_answer_top_p_is_provided(azure_openai_llm, caplog):
|
||||
with patch("langchain_community.chat_models.AzureChatOpenAI") as mock_chat:
|
||||
mock_chat_instance = mock_chat.return_value
|
||||
mock_chat_instance.return_value = MagicMock(content="Test Response")
|
||||
|
||||
prompt = "Test Prompt"
|
||||
config = azure_openai_llm.config
|
||||
config.top_p = 0.5
|
||||
|
||||
response = azure_openai_llm._get_answer(prompt, config)
|
||||
|
||||
assert response == "Test Response"
|
||||
mock_chat.assert_called_once_with(
|
||||
deployment_name=config.deployment_name,
|
||||
openai_api_version="2023-05-15",
|
||||
model_name=config.model or "gpt-3.5-turbo",
|
||||
temperature=config.temperature,
|
||||
max_tokens=config.max_tokens,
|
||||
streaming=config.stream,
|
||||
)
|
||||
mock_chat_instance.assert_called_once_with(
|
||||
azure_openai_llm._get_messages(prompt, system_prompt=config.system_prompt)
|
||||
)
|
||||
|
||||
assert "Config option `top_p` is not supported by this model." in caplog.text
|
||||
|
||||
|
||||
def test_when_no_deployment_name_provided():
|
||||
config = BaseLlmConfig(temperature=0.7, model="gpt-3.5-turbo", max_tokens=50, system_prompt="System Prompt")
|
||||
with pytest.raises(ValueError):
|
||||
|
||||
@@ -66,21 +66,6 @@ def test_discourse_loader_load_post_with_valid_post_id(discourse_loader, monkeyp
|
||||
assert "meta_data" in post_data
|
||||
|
||||
|
||||
def test_discourse_loader_load_post_with_invalid_post_id(discourse_loader, monkeypatch, caplog):
|
||||
def mock_get(*args, **kwargs):
|
||||
class MockResponse:
|
||||
def raise_for_status(self):
|
||||
raise requests.exceptions.RequestException("Test error")
|
||||
|
||||
return MockResponse()
|
||||
|
||||
monkeypatch.setattr(requests, "get", mock_get)
|
||||
|
||||
discourse_loader._load_post(123)
|
||||
|
||||
assert "Failed to load post" in caplog.text
|
||||
|
||||
|
||||
def test_discourse_loader_load_data_with_valid_query(discourse_loader, monkeypatch):
|
||||
def mock_get(*args, **kwargs):
|
||||
class MockResponse:
|
||||
|
||||
Reference in New Issue
Block a user