From 2d5dc84f1a32a2a571ac3a9689d1d8dec5804138 Mon Sep 17 00:00:00 2001 From: Sidharth Mohanty Date: Thu, 19 Oct 2023 10:07:45 +0530 Subject: [PATCH] [chore] Update LLM YAML config docs (#826) --- docs/advanced/configuration.mdx | 2 +- docs/components/embedding-models.mdx | 8 ++++---- docs/components/llms.mdx | 16 ++++++++-------- docs/get-started/faq.mdx | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/advanced/configuration.mdx b/docs/advanced/configuration.mdx index 440384d1..4ab7f38a 100644 --- a/docs/advanced/configuration.mdx +++ b/docs/advanced/configuration.mdx @@ -13,8 +13,8 @@ app: llm: provider: openai - model: 'gpt-3.5-turbo' config: + model: 'gpt-3.5-turbo' temperature: 0.5 max_tokens: 1000 top_p: 1 diff --git a/docs/components/embedding-models.mdx b/docs/components/embedding-models.mdx index 9a7d7c1b..9f5ab5aa 100644 --- a/docs/components/embedding-models.mdx +++ b/docs/components/embedding-models.mdx @@ -65,8 +65,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: azure_openai - model: gpt-35-turbo config: + model: gpt-35-turbo deployment_name: your_llm_deployment_name temperature: 0.5 max_tokens: 1000 @@ -99,8 +99,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: gpt4all - model: 'orca-mini-3b.ggmlv3.q4_0.bin' config: + model: 'orca-mini-3b.ggmlv3.q4_0.bin' temperature: 0.5 max_tokens: 1000 top_p: 1 @@ -130,8 +130,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: huggingface - model: 'google/flan-t5-xxl' config: + model: 'google/flan-t5-xxl' temperature: 0.5 max_tokens: 1000 top_p: 0.5 @@ -161,8 +161,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: vertexai - model: 'chat-bison' config: + model: 'chat-bison' temperature: 0.5 top_p: 0.5 diff --git a/docs/components/llms.mdx b/docs/components/llms.mdx index f42b11c9..328e05cd 100644 --- a/docs/components/llms.mdx +++ b/docs/components/llms.mdx @@ -52,8 +52,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: openai - model: 'gpt-3.5-turbo' config: + model: 'gpt-3.5-turbo' temperature: 0.5 max_tokens: 1000 top_p: 1 @@ -84,8 +84,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: azure_openai - model: gpt-35-turbo config: + model: gpt-35-turbo deployment_name: your_llm_deployment_name temperature: 0.5 max_tokens: 1000 @@ -121,8 +121,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: anthropic - model: 'claude-instant-1' config: + model: 'claude-instant-1' temperature: 0.5 max_tokens: 1000 top_p: 1 @@ -158,8 +158,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: cohere - model: large config: + model: large temperature: 0.5 max_tokens: 1000 top_p: 1 @@ -189,8 +189,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: gpt4all - model: 'orca-mini-3b.ggmlv3.q4_0.bin' config: + model: 'orca-mini-3b.ggmlv3.q4_0.bin' temperature: 0.5 max_tokens: 1000 top_p: 1 @@ -261,8 +261,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: huggingface - model: 'google/flan-t5-xxl' config: + model: 'google/flan-t5-xxl' temperature: 0.5 max_tokens: 1000 top_p: 0.5 @@ -291,8 +291,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: llama2 - model: 'a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5' config: + model: 'a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5' temperature: 0.5 max_tokens: 1000 top_p: 0.5 @@ -316,8 +316,8 @@ app = App.from_config(yaml_path="config.yaml") ```yaml config.yaml llm: provider: vertexai - model: 'chat-bison' config: + model: 'chat-bison' temperature: 0.5 top_p: 0.5 ``` diff --git a/docs/get-started/faq.mdx b/docs/get-started/faq.mdx index cfe84990..7547670b 100644 --- a/docs/get-started/faq.mdx +++ b/docs/get-started/faq.mdx @@ -20,8 +20,8 @@ app = App.from_config(yaml_path="gpt4.yaml") ```yaml gpt4.yaml llm: provider: openai - model: 'gpt-4' config: + model: 'gpt-4' temperature: 0.5 max_tokens: 1000 top_p: 1 @@ -47,8 +47,8 @@ app = App.from_config(yaml_path="opensource.yaml") ```yaml opensource.yaml llm: provider: gpt4all - model: 'orca-mini-3b.ggmlv3.q4_0.bin' config: + model: 'orca-mini-3b.ggmlv3.q4_0.bin' temperature: 0.5 max_tokens: 1000 top_p: 1