From 6c71a1020d8960d45c9f4f919f53700cb4f0fd38 Mon Sep 17 00:00:00 2001 From: cachho Date: Sun, 24 Sep 2023 20:48:03 +0200 Subject: [PATCH] Docs: use LlmConfig instead of QueryConfig (#626) --- docs/advanced/interface_types.mdx | 4 ++-- tests/llm/test_chat.py | 24 +++++------------------- tests/llm/test_generate_prompt.py | 4 ++-- tests/llm/test_query.py | 10 +++++----- 4 files changed, 14 insertions(+), 28 deletions(-) diff --git a/docs/advanced/interface_types.mdx b/docs/advanced/interface_types.mdx index 8fbc2ae9..329d8f6d 100644 --- a/docs/advanced/interface_types.mdx +++ b/docs/advanced/interface_types.mdx @@ -43,11 +43,11 @@ Dry Run is an option in the `add`, `query` and `chat` methods that allows the us - You can add config to your query method to stream responses like ChatGPT does. You would require a downstream handler to render the chunk in your desirable format. Supports both OpenAI model and OpenSourceApp. 📊 -- To use this, instantiate a `QueryConfig` or `ChatConfig` object with `stream=True`. Then pass it to the `.chat()` or `.query()` method. The following example iterates through the chunks and prints them as they appear. +- To use this, instantiate a `LlmConfig` or `ChatConfig` object with `stream=True`. Then pass it to the `.chat()` or `.query()` method. The following example iterates through the chunks and prints them as they appear. ```python app = App() -query_config = QueryConfig(stream = True) +query_config = LlmConfig(stream = True) resp = app.query("What unique capacity does Naval argue humans possess when it comes to understanding explanations or concepts?", query_config) for chunk in resp: diff --git a/tests/llm/test_chat.py b/tests/llm/test_chat.py index 6bffd23f..83d0959d 100644 --- a/tests/llm/test_chat.py +++ b/tests/llm/test_chat.py @@ -63,21 +63,7 @@ class TestApp(unittest.TestCase): @patch("chromadb.api.models.Collection.Collection.add", MagicMock) def test_chat_with_where_in_params(self): """ - This test checks the functionality of the 'chat' method in the App class. - It simulates a scenario where the 'retrieve_from_database' method returns a context list based on - a where filter and 'get_llm_model_answer' returns an expected answer string. - - The 'chat' method is expected to call 'retrieve_from_database' with the where filter and - 'get_llm_model_answer' methods appropriately and return the right answer. - - Key assumptions tested: - - 'retrieve_from_database' method is called exactly once with arguments: "Test query" and an instance of - QueryConfig. - - 'get_llm_model_answer' is called exactly once. The specific arguments are not checked in this test. - - 'chat' method returns the value it received from 'get_llm_model_answer'. - - The test isolates the 'chat' method behavior by mocking out 'retrieve_from_database' and - 'get_llm_model_answer' methods. + Test where filter """ with patch.object(self.app, "retrieve_from_database") as mock_retrieve: mock_retrieve.return_value = ["Test context"] @@ -99,11 +85,11 @@ class TestApp(unittest.TestCase): a where filter and 'get_llm_model_answer' returns an expected answer string. The 'chat' method is expected to call 'retrieve_from_database' with the where filter specified - in the QueryConfig and 'get_llm_model_answer' methods appropriately and return the right answer. + in the LlmConfig and 'get_llm_model_answer' methods appropriately and return the right answer. Key assumptions tested: - 'retrieve_from_database' method is called exactly once with arguments: "Test query" and an instance of - QueryConfig. + LLmConfig. - 'get_llm_model_answer' is called exactly once. The specific arguments are not checked in this test. - 'chat' method returns the value it received from 'get_llm_model_answer'. @@ -114,8 +100,8 @@ class TestApp(unittest.TestCase): mock_answer.return_value = "Test answer" with patch.object(self.app.db, "query") as mock_database_query: mock_database_query.return_value = ["Test context"] - queryConfig = BaseLlmConfig(where={"attribute": "value"}) - answer = self.app.chat("Test query", queryConfig) + llm_config = BaseLlmConfig(where={"attribute": "value"}) + answer = self.app.chat("Test query", llm_config) self.assertEqual(answer, "Test answer") _args, kwargs = mock_database_query.call_args diff --git a/tests/llm/test_generate_prompt.py b/tests/llm/test_generate_prompt.py index 13b664ec..c51c074d 100644 --- a/tests/llm/test_generate_prompt.py +++ b/tests/llm/test_generate_prompt.py @@ -12,7 +12,7 @@ class TestGeneratePrompt(unittest.TestCase): def test_generate_prompt_with_template(self): """ Tests that the generate_prompt method correctly formats the prompt using - a custom template provided in the QueryConfig instance. + a custom template provided in the LlmConfig instance. This test sets up a scenario with an input query and a list of contexts, and a custom template, and then calls generate_prompt. It checks that the @@ -58,7 +58,7 @@ class TestGeneratePrompt(unittest.TestCase): def test_generate_prompt_with_history(self): """ - Test the 'generate_prompt' method with QueryConfig containing a history attribute. + Test the 'generate_prompt' method with LlmConfig containing a history attribute. """ config = BaseLlmConfig() config.template = Template("Context: $context | Query: $query | History: $history") diff --git a/tests/llm/test_query.py b/tests/llm/test_query.py index e16ebc90..f916167f 100644 --- a/tests/llm/test_query.py +++ b/tests/llm/test_query.py @@ -24,7 +24,7 @@ class TestApp(unittest.TestCase): Key assumptions tested: - 'retrieve_from_database' method is called exactly once with arguments: "Test query" and an instance of - QueryConfig. + LlmConfig. - 'get_llm_model_answer' is called exactly once. The specific arguments are not checked in this test. - 'query' method returns the value it received from 'get_llm_model_answer'. @@ -94,7 +94,7 @@ class TestApp(unittest.TestCase): Key assumptions tested: - 'retrieve_from_database' method is called exactly once with arguments: "Test query" and an instance of - QueryConfig. + LlmConfig. - 'get_llm_model_answer' is called exactly once. The specific arguments are not checked in this test. - 'query' method returns the value it received from 'get_llm_model_answer'. @@ -125,7 +125,7 @@ class TestApp(unittest.TestCase): Key assumptions tested: - 'retrieve_from_database' method is called exactly once with arguments: "Test query" and an instance of - QueryConfig. + LlmConfig. - 'get_llm_model_answer' is called exactly once. The specific arguments are not checked in this test. - 'query' method returns the value it received from 'get_llm_model_answer'. @@ -137,8 +137,8 @@ class TestApp(unittest.TestCase): mock_answer.return_value = "Test answer" with patch.object(self.app.db, "query") as mock_database_query: mock_database_query.return_value = ["Test context"] - queryConfig = BaseLlmConfig(where={"attribute": "value"}) - answer = self.app.query("Test query", queryConfig) + llm_config = BaseLlmConfig(where={"attribute": "value"}) + answer = self.app.query("Test query", llm_config) self.assertEqual(answer, "Test answer") _args, kwargs = mock_database_query.call_args