diff --git a/embedchain/embedchain.py b/embedchain/embedchain.py index cc5e62de..15f0c5b6 100644 --- a/embedchain/embedchain.py +++ b/embedchain/embedchain.py @@ -279,7 +279,7 @@ class EmbedChain: memory.chat_memory.add_ai_message(answer) return answer - def dry_run(self, input_query): + def dry_run(self, input_query, config: QueryConfig = None): """ A dry run does everything except send the resulting prompt to the LLM. The purpose is to test the prompt, not the response. @@ -387,4 +387,4 @@ class OpenSourceApp(EmbedChain): response = gpt4all_model.generate( prompt=prompt, ) - return response \ No newline at end of file + return response