diff --git a/embedchain/llm/gpt4all.py b/embedchain/llm/gpt4all.py index 48fd844b..586ac5be 100644 --- a/embedchain/llm/gpt4all.py +++ b/embedchain/llm/gpt4all.py @@ -14,7 +14,7 @@ class GPT4ALLLlm(BaseLlm): self.instance = GPT4ALLLlm._get_instance(self.config.model) def get_llm_model_answer(self, prompt): - return self._get_gpt4all_answer(prompt=prompt, config=self.config) + return self._get_answer(prompt=prompt, config=self.config) @staticmethod def _get_instance(model): @@ -22,12 +22,12 @@ class GPT4ALLLlm(BaseLlm): from gpt4all import GPT4All except ModuleNotFoundError: raise ModuleNotFoundError( - "The GPT4All python package is not installed. Please install it with `pip install --upgrade embedchain[opensource]`" # noqa E501 + "The GPT4All python package is not installed. Please install it with `pip install --upgrade embedchain[opensource]`" # noqa E501 ) from None return GPT4All(model_name=model) - def _get_gpt4all_answer(self, prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]: + def _get_answer(self, prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]: if config.model and config.model != self.config.model: raise RuntimeError( "OpenSourceApp does not support switching models at runtime. Please create a new app instance."