[Improvement] Fix deprecation warnings (#1288)
This commit is contained in:
@@ -39,18 +39,10 @@ def test_get_llm_model_answer(cohere_llm_config, mocker):
|
||||
def test_get_answer_mocked_cohere(cohere_llm_config, mocker):
|
||||
mocked_cohere = mocker.patch("embedchain.llm.cohere.Cohere")
|
||||
mock_instance = mocked_cohere.return_value
|
||||
mock_instance.return_value = "Mocked answer"
|
||||
mock_instance.invoke.return_value = "Mocked answer"
|
||||
|
||||
llm = CohereLlm(cohere_llm_config)
|
||||
prompt = "Test query"
|
||||
answer = llm.get_llm_model_answer(prompt)
|
||||
|
||||
assert answer == "Mocked answer"
|
||||
mocked_cohere.assert_called_once_with(
|
||||
cohere_api_key="test_api_key",
|
||||
model="gptd-instruct-tft",
|
||||
max_tokens=50,
|
||||
temperature=0.7,
|
||||
p=0.8,
|
||||
)
|
||||
mock_instance.assert_called_once_with(prompt)
|
||||
|
||||
@@ -28,7 +28,7 @@ def test_get_llm_model_answer(llama2_llm, mocker):
|
||||
mocked_replicate = mocker.patch("embedchain.llm.llama2.Replicate")
|
||||
mocked_replicate_instance = mocker.MagicMock()
|
||||
mocked_replicate.return_value = mocked_replicate_instance
|
||||
mocked_replicate_instance.return_value = "Test answer"
|
||||
mocked_replicate_instance.invoke.return_value = "Test answer"
|
||||
|
||||
llama2_llm.config.model = "test_model"
|
||||
llama2_llm.config.max_tokens = 50
|
||||
@@ -38,12 +38,3 @@ def test_get_llm_model_answer(llama2_llm, mocker):
|
||||
answer = llama2_llm.get_llm_model_answer("Test query")
|
||||
|
||||
assert answer == "Test answer"
|
||||
mocked_replicate.assert_called_once_with(
|
||||
model="test_model",
|
||||
input={
|
||||
"temperature": 0.7,
|
||||
"max_length": 50,
|
||||
"top_p": 0.8,
|
||||
},
|
||||
)
|
||||
mocked_replicate_instance.assert_called_once_with("Test query")
|
||||
|
||||
@@ -22,18 +22,10 @@ def test_get_llm_model_answer(ollama_llm_config, mocker):
|
||||
def test_get_answer_mocked_ollama(ollama_llm_config, mocker):
|
||||
mocked_ollama = mocker.patch("embedchain.llm.ollama.Ollama")
|
||||
mock_instance = mocked_ollama.return_value
|
||||
mock_instance.return_value = "Mocked answer"
|
||||
mock_instance.invoke.return_value = "Mocked answer"
|
||||
|
||||
llm = OllamaLlm(ollama_llm_config)
|
||||
prompt = "Test query"
|
||||
answer = llm.get_llm_model_answer(prompt)
|
||||
|
||||
assert answer == "Mocked answer"
|
||||
mocked_ollama.assert_called_once_with(
|
||||
model="llama2",
|
||||
system=None,
|
||||
temperature=0.7,
|
||||
top_p=0.8,
|
||||
callback_manager=mocker.ANY, # Use mocker.ANY to ignore the exact instance
|
||||
)
|
||||
mock_instance.assert_called_once_with(prompt)
|
||||
|
||||
@@ -39,18 +39,10 @@ def test_get_llm_model_answer(together_llm_config, mocker):
|
||||
def test_get_answer_mocked_together(together_llm_config, mocker):
|
||||
mocked_together = mocker.patch("embedchain.llm.together.Together")
|
||||
mock_instance = mocked_together.return_value
|
||||
mock_instance.return_value = "Mocked answer"
|
||||
mock_instance.invoke.return_value = "Mocked answer"
|
||||
|
||||
llm = TogetherLlm(together_llm_config)
|
||||
prompt = "Test query"
|
||||
answer = llm.get_llm_model_answer(prompt)
|
||||
|
||||
assert answer == "Mocked answer"
|
||||
mocked_together.assert_called_once_with(
|
||||
together_api_key="test_api_key",
|
||||
model="togethercomputer/RedPajama-INCITE-7B-Base",
|
||||
max_tokens=50,
|
||||
temperature=0.7,
|
||||
top_p=0.8,
|
||||
)
|
||||
mock_instance.assert_called_once_with(prompt)
|
||||
|
||||
Reference in New Issue
Block a user