feat: pass QueryConfig to dry_run (#173)

This commit is contained in:
gasolin
2023-07-07 18:38:14 +08:00
committed by GitHub
parent 0c2653ccf5
commit 5e6aef1e37

View File

@@ -279,7 +279,7 @@ class EmbedChain:
memory.chat_memory.add_ai_message(answer)
return answer
def dry_run(self, input_query):
def dry_run(self, input_query, config: QueryConfig = None):
"""
A dry run does everything except send the resulting prompt to
the LLM. The purpose is to test the prompt, not the response.
@@ -387,4 +387,4 @@ class OpenSourceApp(EmbedChain):
response = gpt4all_model.generate(
prompt=prompt,
)
return response
return response