[Improvement] Fix deprecation warnings (#1288)
This commit is contained in:
@@ -56,7 +56,7 @@ def generate(rq: queue.Queue):
|
||||
```
|
||||
def askQuestion(callback_fn: StreamingStdOutCallbackHandlerYield):
|
||||
llm = OpenAI(streaming=True, callbacks=[callback_fn])
|
||||
return llm(prompt="Write a poem about a tree.")
|
||||
return llm.invoke(prompt="Write a poem about a tree.")
|
||||
|
||||
@app.route("/", methods=["GET"])
|
||||
def generate_output():
|
||||
|
||||
@@ -38,12 +38,11 @@ class AWSBedrockLlm(BaseLlm):
|
||||
}
|
||||
|
||||
if config.stream:
|
||||
from langchain.callbacks.streaming_stdout import \
|
||||
StreamingStdOutCallbackHandler
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
|
||||
callbacks = [StreamingStdOutCallbackHandler()]
|
||||
llm = Bedrock(**kwargs, streaming=config.stream, callbacks=callbacks)
|
||||
else:
|
||||
llm = Bedrock(**kwargs)
|
||||
|
||||
return llm(prompt)
|
||||
return llm.invoke(prompt)
|
||||
|
||||
@@ -40,4 +40,4 @@ class CohereLlm(BaseLlm):
|
||||
p=config.top_p,
|
||||
)
|
||||
|
||||
return llm(prompt)
|
||||
return llm.invoke(prompt)
|
||||
|
||||
@@ -48,4 +48,4 @@ class Llama2Llm(BaseLlm):
|
||||
"top_p": self.config.top_p,
|
||||
},
|
||||
)
|
||||
return llm(prompt)
|
||||
return llm.invoke(prompt)
|
||||
|
||||
@@ -33,4 +33,4 @@ class OllamaLlm(BaseLlm):
|
||||
callback_manager=CallbackManager(callback_manager),
|
||||
)
|
||||
|
||||
return llm(prompt)
|
||||
return llm.invoke(prompt)
|
||||
|
||||
@@ -40,4 +40,4 @@ class TogetherLlm(BaseLlm):
|
||||
top_p=config.top_p,
|
||||
)
|
||||
|
||||
return llm(prompt)
|
||||
return llm.invoke(prompt)
|
||||
|
||||
@@ -37,4 +37,4 @@ class VLLM(BaseLlm):
|
||||
llm_args.update(config.model_kwargs)
|
||||
|
||||
llm = BaseVLLM(**llm_args)
|
||||
return llm(prompt)
|
||||
return llm.invoke(prompt)
|
||||
|
||||
Reference in New Issue
Block a user