[Improvement] Fix deprecation warnings (#1288)

This commit is contained in:
Deshraj Yadav
2024-02-27 15:10:41 -08:00
committed by GitHub
parent 56bf33ab7f
commit 09cdaff9a2
12 changed files with 13 additions and 47 deletions

View File

@@ -56,7 +56,7 @@ def generate(rq: queue.Queue):
```
def askQuestion(callback_fn: StreamingStdOutCallbackHandlerYield):
llm = OpenAI(streaming=True, callbacks=[callback_fn])
return llm(prompt="Write a poem about a tree.")
return llm.invoke(prompt="Write a poem about a tree.")
@app.route("/", methods=["GET"])
def generate_output():

View File

@@ -38,12 +38,11 @@ class AWSBedrockLlm(BaseLlm):
}
if config.stream:
from langchain.callbacks.streaming_stdout import \
StreamingStdOutCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
callbacks = [StreamingStdOutCallbackHandler()]
llm = Bedrock(**kwargs, streaming=config.stream, callbacks=callbacks)
else:
llm = Bedrock(**kwargs)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -40,4 +40,4 @@ class CohereLlm(BaseLlm):
p=config.top_p,
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -48,4 +48,4 @@ class Llama2Llm(BaseLlm):
"top_p": self.config.top_p,
},
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -33,4 +33,4 @@ class OllamaLlm(BaseLlm):
callback_manager=CallbackManager(callback_manager),
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -40,4 +40,4 @@ class TogetherLlm(BaseLlm):
top_p=config.top_p,
)
return llm(prompt)
return llm.invoke(prompt)

View File

@@ -37,4 +37,4 @@ class VLLM(BaseLlm):
llm_args.update(config.model_kwargs)
llm = BaseVLLM(**llm_args)
return llm(prompt)
return llm.invoke(prompt)