From eb7a7e09eb459165d8a3f00f3921f32acb4227ad Mon Sep 17 00:00:00 2001 From: Dev Khant Date: Thu, 15 Aug 2024 21:13:00 +0530 Subject: [PATCH] Add configs to llm docs (#1707) --- docs/components/llms.mdx | 302 ------------------- docs/components/llms/config.mdx | 66 ++++ docs/components/llms/models/anthropic.mdx | 29 ++ docs/components/llms/models/aws_bedrock.mdx | 34 +++ docs/components/llms/models/azure_openai.mdx | 30 ++ docs/components/llms/models/google_AI.mdx | 29 ++ docs/components/llms/models/groq.mdx | 31 ++ docs/components/llms/models/litellm.mdx | 28 ++ docs/components/llms/models/mistral_AI.mdx | 29 ++ docs/components/llms/models/ollama.mdx | 28 ++ docs/components/llms/models/openai.mdx | 39 +++ docs/components/llms/models/together.mdx | 29 ++ docs/components/llms/overview.mdx | 13 + docs/mint.json | 20 +- 14 files changed, 404 insertions(+), 303 deletions(-) delete mode 100644 docs/components/llms.mdx create mode 100644 docs/components/llms/config.mdx create mode 100644 docs/components/llms/models/anthropic.mdx create mode 100644 docs/components/llms/models/aws_bedrock.mdx create mode 100644 docs/components/llms/models/azure_openai.mdx create mode 100644 docs/components/llms/models/google_AI.mdx create mode 100644 docs/components/llms/models/groq.mdx create mode 100644 docs/components/llms/models/litellm.mdx create mode 100644 docs/components/llms/models/mistral_AI.mdx create mode 100644 docs/components/llms/models/ollama.mdx create mode 100644 docs/components/llms/models/openai.mdx create mode 100644 docs/components/llms/models/together.mdx create mode 100644 docs/components/llms/overview.mdx diff --git a/docs/components/llms.mdx b/docs/components/llms.mdx deleted file mode 100644 index fbb9b5a6..00000000 --- a/docs/components/llms.mdx +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: Supported LLMs ---- - -## Overview - -Mem0 includes built-in support for various popular large language models. Memory can utilize the LLM provided by the user, ensuring efficient use for specific needs. - - - - - - - - - - - - - - -## OpenAI - -To use OpenAI LLM models, you have to set the `OPENAI_API_KEY` environment variable. You can obtain the OpenAI API key from the [OpenAI Platform](https://platform.openai.com/account/api-keys). - -Once you have obtained the key, you can use it like this: - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" - -config = { - "llm": { - "provider": "openai", - "config": { - "model": "gpt-4o", - "temperature": 0.2, - "max_tokens": 1500, - } - } -} - -# Use Openrouter by passing it's api key -# os.environ["OPENROUTER_API_KEY"] = "your-api-key" -# config = { -# "llm": { -# "provider": "openai", -# "config": { -# "model": "meta-llama/llama-3.1-70b-instruct", -# } -# } -# } - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Ollama - -You can use LLMs from Ollama to run Mem0 locally. These [models](https://ollama.com/search?c=tools) support tool support. - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" # for embedder - -config = { - "llm": { - "provider": "ollama", - "config": { - "model": "mixtral:8x7b", - "temperature": 0.1, - "max_tokens": 2000, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Groq - -[Groq](https://groq.com/) is the creator of the world's first Language Processing Unit (LPU), providing exceptional speed performance for AI workloads running on their LPU Inference Engine. - -In order to use LLMs from Groq, go to their [platform](https://console.groq.com/keys) and get the API key. Set the API key as `GROQ_API_KEY` environment variable to use the model as given below in the example. - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model -os.environ["GROQ_API_KEY"] = "your-api-key" - -config = { - "llm": { - "provider": "groq", - "config": { - "model": "mixtral-8x7b-32768", - "temperature": 0.1, - "max_tokens": 1000, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Together - -To use TogetherAI LLM models, you have to set the `TOGETHER_API_KEY` environment variable. You can obtain the TogetherAI API key from their [Account settings page](https://api.together.xyz/settings/api-keys). - -Once you have obtained the key, you can use it like this: - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model -os.environ["TOGETHER_API_KEY"] = "your-api-key" - -config = { - "llm": { - "provider": "togetherai", - "config": { - "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "temperature": 0.2, - "max_tokens": 1500, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## AWS Bedrock - -### Setup -- Before using the AWS Bedrock LLM, make sure you have the appropriate model access from [Bedrock Console](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/modelaccess). -- You will also need to authenticate the `boto3` client by using a method in the [AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials) -- You will have to export `AWS_REGION`, `AWS_ACCESS_KEY`, and `AWS_SECRET_ACCESS_KEY` to set environment variables. - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model -os.environ['AWS_REGION'] = 'us-east-1' -os.environ["AWS_ACCESS_KEY"] = "xx" -os.environ["AWS_SECRET_ACCESS_KEY"] = "xx" - -config = { - "llm": { - "provider": "aws_bedrock", - "config": { - "model": "arn:aws:bedrock:us-east-1:123456789012:model/your-model-name", - "temperature": 0.2, - "max_tokens": 1500, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Litellm - -[Litellm](https://litellm.vercel.app/docs/) is compatible with over 100 large language models (LLMs), all using a standardized input/output format. You can explore the [available models]((https://litellm.vercel.app/docs/providers)) to use with Litellm. Ensure you set the `API_KEY` for the model you choose to use. - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" - -config = { - "llm": { - "provider": "litellm", - "config": { - "model": "gpt-3.5-turbo", - "temperature": 0.2, - "max_tokens": 1500, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Google AI - -To use Google AI model, you have to set the `GOOGLE_API_KEY` environment variable. You can obtain the Google API key from the [Google Maker Suite](https://makersuite.google.com/app/apikey) - -Once you have obtained the key, you can use it like this: - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model -os.environ["GEMINI_API_KEY"] = "your-api-key" - -config = { - "llm": { - "provider": "litellm", - "config": { - "model": "gemini/gemini-pro", - "temperature": 0.2, - "max_tokens": 1500, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Anthropic - -To use anthropic's models, please set the `ANTHROPIC_API_KEY` which you find on their [Account Settings Page](https://console.anthropic.com/account/keys). - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model -os.environ["ANTHROPIC_API_KEY"] = "your-api-key" - -config = { - "llm": { - "provider": "litellm", - "config": { - "model": "claude-3-opus-20240229", - "temperature": 0.1, - "max_tokens": 2000, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Mistral AI - -To use mistral's models, please Obtain the Mistral AI api key from their [console](https://console.mistral.ai/). Set the `MISTRAL_API_KEY` environment variable to use the model as given below in the example. - -```python -import os -from mem0 import Memory - -os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model -os.environ["MISTRAL_API_KEY"] = "your-api-key" - -config = { - "llm": { - "provider": "litellm", - "config": { - "model": "open-mixtral-8x7b", - "temperature": 0.1, - "max_tokens": 2000, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` - -## Azure OpenAI - -To use Azure OpenAI models, you have to set the `AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_ENDPOINT`, and `OPENAI_API_VERSION` environment variables. You can obtain the Azure API key from the [Azure](https://azure.microsoft.com/). - -```python -import os -from mem0 import Memory - -os.environ["AZURE_OPENAI_API_KEY"] = "your-api-key" -os.environ["AZURE_OPENAI_ENDPOINT"] = "your-api-base-url" -os.environ["OPENAI_API_VERSION"] = "version-to-use" - -config = { - "llm": { - "provider": "azure_openai", - "config": { - "model": "your-deployment-name", - "temperature": 0.1, - "max_tokens": 2000, - } - } -} - -m = Memory.from_config(config) -m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) -``` diff --git a/docs/components/llms/config.mdx b/docs/components/llms/config.mdx new file mode 100644 index 00000000..2cf296ba --- /dev/null +++ b/docs/components/llms/config.mdx @@ -0,0 +1,66 @@ +## What is Config? + +Config in mem0 is a dictionary that specifies the settings for your llms. It allows you to customize the behavior and connection details of your chosen llm. + +## How to Define Config + +The config is defined as a Python dictionary with two main keys: +- `llm`: Specifies the llm provider and its configuration + - `provider`: The name of the llm (e.g., "openai", "groq") + - `config`: A nested dictionary containing provider-specific settings + +## How to Use Config + +Here's a general example of how to use the config with mem0: + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "sk-xx" # for embedder + +config = { + "llm": { + "provider": "your_chosen_provider", + "config": { + # Provider-specific settings go here + } + } +} + +m = Memory.from_config(config) +m.add("Your text here", user_id="user", metadata={"category": "example"}) +``` + +## Why is Config Needed? + +Config is essential for: +1. Specifying which llm to use. +2. Providing necessary connection details (e.g., model, api_key, temperature). +3. Ensuring proper initialization and connection to your chosen llm. + +## Master List of All Params in Config + +Here's a comprehensive list of all parameters that can be used across different llms: + +Here's the table based on the provided parameters: + +| Parameter | Description | Provider | +|----------------------|-----------------------------------------------|-------------------| +| `model` | Embedding model to use | All | +| `temperature` | Temperature of the model | All | +| `api_key` | API key to use | All | +| `max_tokens` | Tokens to generate | All | +| `top_p` | Probability threshold for nucleus sampling | All | +| `top_k` | Number of highest probability tokens to keep | All | +| `models` | List of models | Openrouter | +| `route` | Routing strategy | Openrouter | +| `openrouter_base_url`| Base URL for Openrouter API | Openrouter | +| `site_url` | Site URL | Openrouter | +| `app_name` | Application name | Openrouter | +| `ollama_base_url` | Base URL for Ollama API | Ollama | + + +## Supported LLMs + +For detailed information on configuring specific llms, please visit the [LLMs](./models) section. There you'll find information for each supported llm with provider-specific usage examples and configuration details. diff --git a/docs/components/llms/models/anthropic.mdx b/docs/components/llms/models/anthropic.mdx new file mode 100644 index 00000000..916c88d2 --- /dev/null +++ b/docs/components/llms/models/anthropic.mdx @@ -0,0 +1,29 @@ +To use anthropic's models, please set the `ANTHROPIC_API_KEY` which you find on their [Account Settings Page](https://console.anthropic.com/account/keys). + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "litellm", + "config": { + "model": "claude-3-opus-20240229", + "temperature": 0.1, + "max_tokens": 2000, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `anthropic` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/aws_bedrock.mdx b/docs/components/llms/models/aws_bedrock.mdx new file mode 100644 index 00000000..31171bbd --- /dev/null +++ b/docs/components/llms/models/aws_bedrock.mdx @@ -0,0 +1,34 @@ +### Setup +- Before using the AWS Bedrock LLM, make sure you have the appropriate model access from [Bedrock Console](https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/modelaccess). +- You will also need to authenticate the `boto3` client by using a method in the [AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials) +- You will have to export `AWS_REGION`, `AWS_ACCESS_KEY`, and `AWS_SECRET_ACCESS_KEY` to set environment variables. + +### Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model +os.environ['AWS_REGION'] = 'us-east-1' +os.environ["AWS_ACCESS_KEY"] = "xx" +os.environ["AWS_SECRET_ACCESS_KEY"] = "xx" + +config = { + "llm": { + "provider": "aws_bedrock", + "config": { + "model": "arn:aws:bedrock:us-east-1:123456789012:model/your-model-name", + "temperature": 0.2, + "max_tokens": 1500, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +### Config + +All available parameters for the `aws_bedrock` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/azure_openai.mdx b/docs/components/llms/models/azure_openai.mdx new file mode 100644 index 00000000..231c0500 --- /dev/null +++ b/docs/components/llms/models/azure_openai.mdx @@ -0,0 +1,30 @@ +To use Azure OpenAI models, you have to set the `AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_ENDPOINT`, and `OPENAI_API_VERSION` environment variables. You can obtain the Azure API key from the [Azure](https://azure.microsoft.com/). + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["AZURE_OPENAI_API_KEY"] = "your-api-key" +os.environ["AZURE_OPENAI_ENDPOINT"] = "your-api-base-url" +os.environ["OPENAI_API_VERSION"] = "version-to-use" + +config = { + "llm": { + "provider": "azure_openai", + "config": { + "model": "your-deployment-name", + "temperature": 0.1, + "max_tokens": 2000, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `azure_openai` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/google_AI.mdx b/docs/components/llms/models/google_AI.mdx new file mode 100644 index 00000000..b5f37503 --- /dev/null +++ b/docs/components/llms/models/google_AI.mdx @@ -0,0 +1,29 @@ +To use Google AI model, you have to set the `GOOGLE_API_KEY` environment variable. You can obtain the Google API key from the [Google Maker Suite](https://makersuite.google.com/app/apikey) + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model +os.environ["GEMINI_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "litellm", + "config": { + "model": "gemini/gemini-pro", + "temperature": 0.2, + "max_tokens": 1500, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `litellm` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/groq.mdx b/docs/components/llms/models/groq.mdx new file mode 100644 index 00000000..d27aa17b --- /dev/null +++ b/docs/components/llms/models/groq.mdx @@ -0,0 +1,31 @@ +[Groq](https://groq.com/) is the creator of the world's first Language Processing Unit (LPU), providing exceptional speed performance for AI workloads running on their LPU Inference Engine. + +In order to use LLMs from Groq, go to their [platform](https://console.groq.com/keys) and get the API key. Set the API key as `GROQ_API_KEY` environment variable to use the model as given below in the example. + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model +os.environ["GROQ_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "groq", + "config": { + "model": "mixtral-8x7b-32768", + "temperature": 0.1, + "max_tokens": 1000, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `groq` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/litellm.mdx b/docs/components/llms/models/litellm.mdx new file mode 100644 index 00000000..9eb0855e --- /dev/null +++ b/docs/components/llms/models/litellm.mdx @@ -0,0 +1,28 @@ +[Litellm](https://litellm.vercel.app/docs/) is compatible with over 100 large language models (LLMs), all using a standardized input/output format. You can explore the [available models]((https://litellm.vercel.app/docs/providers)) to use with Litellm. Ensure you set the `API_KEY` for the model you choose to use. + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "litellm", + "config": { + "model": "gpt-3.5-turbo", + "temperature": 0.2, + "max_tokens": 1500, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `litellm` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/mistral_AI.mdx b/docs/components/llms/models/mistral_AI.mdx new file mode 100644 index 00000000..83b0426b --- /dev/null +++ b/docs/components/llms/models/mistral_AI.mdx @@ -0,0 +1,29 @@ +To use mistral's models, please Obtain the Mistral AI api key from their [console](https://console.mistral.ai/). Set the `MISTRAL_API_KEY` environment variable to use the model as given below in the example. + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model +os.environ["MISTRAL_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "litellm", + "config": { + "model": "open-mixtral-8x7b", + "temperature": 0.1, + "max_tokens": 2000, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `litellm` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/ollama.mdx b/docs/components/llms/models/ollama.mdx new file mode 100644 index 00000000..eac73685 --- /dev/null +++ b/docs/components/llms/models/ollama.mdx @@ -0,0 +1,28 @@ +You can use LLMs from Ollama to run Mem0 locally. These [models](https://ollama.com/search?c=tools) support tool support. + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" # for embedder + +config = { + "llm": { + "provider": "ollama", + "config": { + "model": "mixtral:8x7b", + "temperature": 0.1, + "max_tokens": 2000, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `ollama` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/openai.mdx b/docs/components/llms/models/openai.mdx new file mode 100644 index 00000000..448dcfb4 --- /dev/null +++ b/docs/components/llms/models/openai.mdx @@ -0,0 +1,39 @@ +To use OpenAI LLM models, you have to set the `OPENAI_API_KEY` environment variable. You can obtain the OpenAI API key from the [OpenAI Platform](https://platform.openai.com/account/api-keys). + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "openai", + "config": { + "model": "gpt-4o", + "temperature": 0.2, + "max_tokens": 1500, + } + } +} + +# Use Openrouter by passing it's api key +# os.environ["OPENROUTER_API_KEY"] = "your-api-key" +# config = { +# "llm": { +# "provider": "openai", +# "config": { +# "model": "meta-llama/llama-3.1-70b-instruct", +# } +# } +# } + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `openai` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/models/together.mdx b/docs/components/llms/models/together.mdx new file mode 100644 index 00000000..98707cee --- /dev/null +++ b/docs/components/llms/models/together.mdx @@ -0,0 +1,29 @@ +To use TogetherAI LLM models, you have to set the `TOGETHER_API_KEY` environment variable. You can obtain the TogetherAI API key from their [Account settings page](https://api.together.xyz/settings/api-keys). + +## Usage + +```python +import os +from mem0 import Memory + +os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model +os.environ["TOGETHER_API_KEY"] = "your-api-key" + +config = { + "llm": { + "provider": "togetherai", + "config": { + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "temperature": 0.2, + "max_tokens": 1500, + } + } +} + +m = Memory.from_config(config) +m.add("Likes to play cricket on weekends", user_id="alice", metadata={"category": "hobbies"}) +``` + +## Config + +All available parameters for the `togetherai` config are present in [Master List of All Params in Config](../config). \ No newline at end of file diff --git a/docs/components/llms/overview.mdx b/docs/components/llms/overview.mdx new file mode 100644 index 00000000..f615504d --- /dev/null +++ b/docs/components/llms/overview.mdx @@ -0,0 +1,13 @@ +--- +title: Overview +--- + +Mem0 includes built-in support for various popular large language models. Memory can utilize the LLM provided by the user, ensuring efficient use for specific needs. + +## Usage + +To use a llm, you must provide a configuration to customize its usage. If no configuration is supplied, a default configuration will be applied, and `OpenAI` will be used as the llm. + +For a comprehensive list of available parameters for llm configuration, please refer to [Config](./config). + +To view all supported llms, visit the [Supported LLMs](./models). diff --git a/docs/mint.json b/docs/mint.json index 1bf5776d..d21c04aa 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -63,7 +63,25 @@ "open-source/quickstart", { "group": "LLMs", - "pages": ["components/llms"] + "pages": [ + "components/llms/overview", + "components/llms/config", + { + "group": "Supported LLMs", + "pages": [ + "components/llms/models/openai", + "components/llms/models/anthropic", + "components/llms/models/azure_openai", + "components/llms/models/ollama", + "components/llms/models/together", + "components/llms/models/groq", + "components/llms/models/litellm", + "components/llms/models/mistral_AI", + "components/llms/models/google_AI", + "components/llms/models/aws_bedrock" + ] + } + ] }, { "group": "Vector Databases",