fix: use template from tempory LlmConfig (#590)
This commit is contained in:
@@ -174,27 +174,37 @@ class BaseLlm(JSONSerializable):
|
|||||||
:return: The answer to the query or the dry run result
|
:return: The answer to the query or the dry run result
|
||||||
:rtype: str
|
:rtype: str
|
||||||
"""
|
"""
|
||||||
query_config = config or self.config
|
try:
|
||||||
|
if config:
|
||||||
|
# A config instance passed to this method will only be applied temporarily, for one call.
|
||||||
|
# So we will save the previous config and restore it at the end of the execution.
|
||||||
|
# For this we use the serializer.
|
||||||
|
prev_config = self.config.serialize()
|
||||||
|
self.config = config
|
||||||
|
|
||||||
if self.is_docs_site_instance:
|
if self.is_docs_site_instance:
|
||||||
query_config.template = DOCS_SITE_PROMPT_TEMPLATE
|
self.config.template = DOCS_SITE_PROMPT_TEMPLATE
|
||||||
query_config.number_documents = 5
|
self.config.number_documents = 5
|
||||||
k = {}
|
k = {}
|
||||||
if self.online:
|
if self.online:
|
||||||
k["web_search_result"] = self.access_search_and_get_results(input_query)
|
k["web_search_result"] = self.access_search_and_get_results(input_query)
|
||||||
prompt = self.generate_prompt(input_query, contexts, **k)
|
prompt = self.generate_prompt(input_query, contexts, **k)
|
||||||
logging.info(f"Prompt: {prompt}")
|
logging.info(f"Prompt: {prompt}")
|
||||||
|
|
||||||
if dry_run:
|
if dry_run:
|
||||||
return prompt
|
return prompt
|
||||||
|
|
||||||
answer = self.get_answer_from_llm(prompt)
|
answer = self.get_answer_from_llm(prompt)
|
||||||
|
|
||||||
if isinstance(answer, str):
|
if isinstance(answer, str):
|
||||||
logging.info(f"Answer: {answer}")
|
logging.info(f"Answer: {answer}")
|
||||||
return answer
|
return answer
|
||||||
else:
|
else:
|
||||||
return self._stream_query_response(answer)
|
return self._stream_query_response(answer)
|
||||||
|
finally:
|
||||||
|
if config:
|
||||||
|
# Restore previous config
|
||||||
|
self.config: BaseLlmConfig = BaseLlmConfig.deserialize(prev_config)
|
||||||
|
|
||||||
def chat(self, input_query: str, contexts: List[str], config: BaseLlmConfig = None, dry_run=False):
|
def chat(self, input_query: str, contexts: List[str], config: BaseLlmConfig = None, dry_run=False):
|
||||||
"""
|
"""
|
||||||
@@ -217,39 +227,49 @@ class BaseLlm(JSONSerializable):
|
|||||||
:return: The answer to the query or the dry run result
|
:return: The answer to the query or the dry run result
|
||||||
:rtype: str
|
:rtype: str
|
||||||
"""
|
"""
|
||||||
query_config = config or self.config
|
try:
|
||||||
|
if config:
|
||||||
|
# A config instance passed to this method will only be applied temporarily, for one call.
|
||||||
|
# So we will save the previous config and restore it at the end of the execution.
|
||||||
|
# For this we use the serializer.
|
||||||
|
prev_config = self.config.serialize()
|
||||||
|
self.config = config
|
||||||
|
|
||||||
if self.is_docs_site_instance:
|
if self.is_docs_site_instance:
|
||||||
query_config.template = DOCS_SITE_PROMPT_TEMPLATE
|
self.config.template = DOCS_SITE_PROMPT_TEMPLATE
|
||||||
query_config.number_documents = 5
|
self.config.number_documents = 5
|
||||||
k = {}
|
k = {}
|
||||||
if self.online:
|
if self.online:
|
||||||
k["web_search_result"] = self.access_search_and_get_results(input_query)
|
k["web_search_result"] = self.access_search_and_get_results(input_query)
|
||||||
|
|
||||||
self.update_history()
|
|
||||||
|
|
||||||
prompt = self.generate_prompt(input_query, contexts, **k)
|
|
||||||
logging.info(f"Prompt: {prompt}")
|
|
||||||
|
|
||||||
if dry_run:
|
|
||||||
return prompt
|
|
||||||
|
|
||||||
answer = self.get_answer_from_llm(prompt)
|
|
||||||
|
|
||||||
self.memory.chat_memory.add_user_message(input_query)
|
|
||||||
|
|
||||||
if isinstance(answer, str):
|
|
||||||
self.memory.chat_memory.add_ai_message(answer)
|
|
||||||
logging.info(f"Answer: {answer}")
|
|
||||||
|
|
||||||
# NOTE: Adding to history before and after. This could be seen as redundant.
|
|
||||||
# If we change it, we have to change the tests (no big deal).
|
|
||||||
self.update_history()
|
self.update_history()
|
||||||
|
|
||||||
return answer
|
prompt = self.generate_prompt(input_query, contexts, **k)
|
||||||
else:
|
logging.info(f"Prompt: {prompt}")
|
||||||
# this is a streamed response and needs to be handled differently.
|
|
||||||
return self._stream_chat_response(answer)
|
if dry_run:
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
answer = self.get_answer_from_llm(prompt)
|
||||||
|
|
||||||
|
self.memory.chat_memory.add_user_message(input_query)
|
||||||
|
|
||||||
|
if isinstance(answer, str):
|
||||||
|
self.memory.chat_memory.add_ai_message(answer)
|
||||||
|
logging.info(f"Answer: {answer}")
|
||||||
|
|
||||||
|
# NOTE: Adding to history before and after. This could be seen as redundant.
|
||||||
|
# If we change it, we have to change the tests (no big deal).
|
||||||
|
self.update_history()
|
||||||
|
|
||||||
|
return answer
|
||||||
|
else:
|
||||||
|
# this is a streamed response and needs to be handled differently.
|
||||||
|
return self._stream_chat_response(answer)
|
||||||
|
finally:
|
||||||
|
if config:
|
||||||
|
# Restore previous config
|
||||||
|
self.config: BaseLlmConfig = BaseLlmConfig.deserialize(prev_config)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_messages(prompt: str, system_prompt: Optional[str] = None) -> List[BaseMessage]:
|
def _get_messages(prompt: str, system_prompt: Optional[str] = None) -> List[BaseMessage]:
|
||||||
|
|||||||
Reference in New Issue
Block a user