Add tests for gpt4all llm (#852)

This commit is contained in:
Deven Patel
2023-10-26 20:41:28 -07:00
committed by GitHub
parent 191ae3ec1e
commit c95d458e52
3 changed files with 62 additions and 1 deletions

View File

@@ -50,6 +50,7 @@ class EmbedderFactory:
provider_to_config_class = {
"azure_openai": "embedchain.config.embedder.base.BaseEmbedderConfig",
"openai": "embedchain.config.embedder.base.BaseEmbedderConfig",
"gpt4all": "embedchain.config.embedder.base.BaseEmbedderConfig",
}
@classmethod

View File

@@ -15,7 +15,7 @@ class GPT4ALLLlm(BaseLlm):
if self.config.model is None:
self.config.model = "orca-mini-3b.ggmlv3.q4_0.bin"
self.instance = GPT4ALLLlm._get_instance(self.config.model)
self.instance.streaming = config.stream
self.instance.streaming = self.config.stream
def get_llm_model_answer(self, prompt):
return self._get_answer(prompt=prompt, config=self.config)

60
tests/llm/test_gpt4all.py Normal file
View File

@@ -0,0 +1,60 @@
import pytest
from langchain.llms.gpt4all import GPT4All as LangchainGPT4All
from embedchain.config import BaseLlmConfig
from embedchain.llm.gpt4all import GPT4ALLLlm
@pytest.fixture
def config():
config = BaseLlmConfig(
temperature=0.7,
max_tokens=50,
top_p=0.8,
stream=False,
system_prompt="System prompt",
model="orca-mini-3b.ggmlv3.q4_0.bin",
)
yield config
@pytest.fixture
def gpt4all_with_config(config):
return GPT4ALLLlm(config=config)
@pytest.fixture
def gpt4all_without_config():
return GPT4ALLLlm()
def test_gpt4all_init_with_config(config, gpt4all_with_config):
assert gpt4all_with_config.config.temperature == config.temperature
assert gpt4all_with_config.config.max_tokens == config.max_tokens
assert gpt4all_with_config.config.top_p == config.top_p
assert gpt4all_with_config.config.stream == config.stream
assert gpt4all_with_config.config.system_prompt == config.system_prompt
assert gpt4all_with_config.config.model == config.model
assert isinstance(gpt4all_with_config.instance, LangchainGPT4All)
def test_gpt4all_init_without_config(gpt4all_without_config):
assert gpt4all_without_config.config.model == "orca-mini-3b.ggmlv3.q4_0.bin"
assert isinstance(gpt4all_without_config.instance, LangchainGPT4All)
def test_get_llm_model_answer(mocker, gpt4all_with_config):
test_query = "Test query"
test_answer = "Test answer"
mocked_get_answer = mocker.patch("embedchain.llm.gpt4all.GPT4ALLLlm._get_answer", return_value=test_answer)
answer = gpt4all_with_config.get_llm_model_answer(test_query)
assert answer == test_answer
mocked_get_answer.assert_called_once_with(prompt=test_query, config=gpt4all_with_config.config)
def test_gpt4all_model_switching(gpt4all_with_config):
with pytest.raises(RuntimeError, match="GPT4ALLLlm does not support switching models at runtime."):
gpt4all_with_config._get_answer("Test prompt", BaseLlmConfig(model="new_model"))