[Improvement] Fix deprecation warnings (#1288)

This commit is contained in:
Deshraj Yadav
2024-02-27 15:10:41 -08:00
committed by GitHub
parent 56bf33ab7f
commit 09cdaff9a2
12 changed files with 13 additions and 47 deletions

View File

@@ -56,7 +56,7 @@ def generate(rq: queue.Queue):
```
def askQuestion(callback_fn: StreamingStdOutCallbackHandlerYield):
llm = OpenAI(streaming=True, callbacks=[callback_fn])
return llm(prompt="Write a poem about a tree.")
return llm.invoke(prompt="Write a poem about a tree.")
@app.route("/", methods=["GET"])
def generate_output():

View File

@@ -38,12 +38,11 @@ class AWSBedrockLlm(BaseLlm):
}
if config.stream:
from langchain.callbacks.streaming_stdout import \
StreamingStdOutCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
callbacks = [StreamingStdOutCallbackHandler()]
llm = Bedrock(**kwargs, streaming=config.stream, callbacks=callbacks)
else:
llm = Bedrock(**kwargs)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -40,4 +40,4 @@ class CohereLlm(BaseLlm):
p=config.top_p,
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -48,4 +48,4 @@ class Llama2Llm(BaseLlm):
"top_p": self.config.top_p,
},
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -33,4 +33,4 @@ class OllamaLlm(BaseLlm):
callback_manager=CallbackManager(callback_manager),
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -40,4 +40,4 @@ class TogetherLlm(BaseLlm):
top_p=config.top_p,
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -37,4 +37,4 @@ class VLLM(BaseLlm):
llm_args.update(config.model_kwargs)
llm = BaseVLLM(**llm_args)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "embedchain"
version = "0.1.87"
version = "0.1.88"
description = "Simplest open source retrieval(RAG) framework"
authors = [
"Taranjeet Singh <taranjeet@embedchain.ai>",

View File

@@ -39,18 +39,10 @@ def test_get_llm_model_answer(cohere_llm_config, mocker):
def test_get_answer_mocked_cohere(cohere_llm_config, mocker):
mocked_cohere = mocker.patch("embedchain.llm.cohere.Cohere")
mock_instance = mocked_cohere.return_value
mock_instance.return_value = "Mocked answer"
mock_instance.invoke.return_value = "Mocked answer"
llm = CohereLlm(cohere_llm_config)
prompt = "Test query"
answer = llm.get_llm_model_answer(prompt)
assert answer == "Mocked answer"
mocked_cohere.assert_called_once_with(
cohere_api_key="test_api_key",
model="gptd-instruct-tft",
max_tokens=50,
temperature=0.7,
p=0.8,
)
mock_instance.assert_called_once_with(prompt)

View File

@@ -28,7 +28,7 @@ def test_get_llm_model_answer(llama2_llm, mocker):
mocked_replicate = mocker.patch("embedchain.llm.llama2.Replicate")
mocked_replicate_instance = mocker.MagicMock()
mocked_replicate.return_value = mocked_replicate_instance
mocked_replicate_instance.return_value = "Test answer"
mocked_replicate_instance.invoke.return_value = "Test answer"
llama2_llm.config.model = "test_model"
llama2_llm.config.max_tokens = 50
@@ -38,12 +38,3 @@ def test_get_llm_model_answer(llama2_llm, mocker):
answer = llama2_llm.get_llm_model_answer("Test query")
assert answer == "Test answer"
mocked_replicate.assert_called_once_with(
model="test_model",
input={
"temperature": 0.7,
"max_length": 50,
"top_p": 0.8,
},
)
mocked_replicate_instance.assert_called_once_with("Test query")

View File

@@ -22,18 +22,10 @@ def test_get_llm_model_answer(ollama_llm_config, mocker):
def test_get_answer_mocked_ollama(ollama_llm_config, mocker):
mocked_ollama = mocker.patch("embedchain.llm.ollama.Ollama")
mock_instance = mocked_ollama.return_value
mock_instance.return_value = "Mocked answer"
mock_instance.invoke.return_value = "Mocked answer"
llm = OllamaLlm(ollama_llm_config)
prompt = "Test query"
answer = llm.get_llm_model_answer(prompt)
assert answer == "Mocked answer"
mocked_ollama.assert_called_once_with(
model="llama2",
system=None,
temperature=0.7,
top_p=0.8,
callback_manager=mocker.ANY, # Use mocker.ANY to ignore the exact instance
)
mock_instance.assert_called_once_with(prompt)

View File

@@ -39,18 +39,10 @@ def test_get_llm_model_answer(together_llm_config, mocker):
def test_get_answer_mocked_together(together_llm_config, mocker):
mocked_together = mocker.patch("embedchain.llm.together.Together")
mock_instance = mocked_together.return_value
mock_instance.return_value = "Mocked answer"
mock_instance.invoke.return_value = "Mocked answer"
llm = TogetherLlm(together_llm_config)
prompt = "Test query"
answer = llm.get_llm_model_answer(prompt)
assert answer == "Mocked answer"
mocked_together.assert_called_once_with(
together_api_key="test_api_key",
model="togethercomputer/RedPajama-INCITE-7B-Base",
max_tokens=50,
temperature=0.7,
top_p=0.8,
)
mock_instance.assert_called_once_with(prompt)