[Improvements] Add support for creating app from YAML string config (#980)

This commit is contained in:
Deven Patel
2023-11-29 12:25:30 -08:00
committed by GitHub
parent e35eaf1bfc
commit 406c46e7f4
34 changed files with 351 additions and 179 deletions

View File

@@ -29,7 +29,7 @@ from embedchain import Pipeline as App
os.environ['OPENAI_API_KEY'] = 'xxx'
# load embedding model configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
app.add("https://en.wikipedia.org/wiki/OpenAI")
app.query("What is OpenAI?")
@@ -59,7 +59,7 @@ os.environ["AZURE_OPENAI_ENDPOINT"] = "https://xxx.openai.azure.com/"
os.environ["AZURE_OPENAI_API_KEY"] = "xxx"
os.environ["OPENAI_API_VERSION"] = "xxx"
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -93,7 +93,7 @@ GPT4All supports generating high quality embeddings of arbitrary length document
from embedchain import Pipeline as App
# load embedding model configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -122,7 +122,7 @@ Hugging Face supports generating embeddings of arbitrary length documents of tex
from embedchain import Pipeline as App
# load embedding model configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -153,7 +153,7 @@ Embedchain supports Google's VertexAI embeddings model through a simple interfac
from embedchain import Pipeline as App
# load embedding model configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml

View File

@@ -46,7 +46,7 @@ from embedchain import Pipeline as App
os.environ['OPENAI_API_KEY'] = 'xxx'
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -78,7 +78,7 @@ os.environ["OPENAI_API_BASE"] = "https://xxx.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "xxx"
os.environ["OPENAI_API_VERSION"] = "xxx"
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -115,7 +115,7 @@ from embedchain import Pipeline as App
os.environ["ANTHROPIC_API_KEY"] = "xxx"
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -152,7 +152,7 @@ from embedchain import Pipeline as App
os.environ["COHERE_API_KEY"] = "xxx"
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -183,7 +183,7 @@ GPT4all is a free-to-use, locally running, privacy-aware chatbot. No GPU or inte
from embedchain import Pipeline as App
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -216,7 +216,7 @@ from embedchain import Pipeline as App
os.environ["JINACHAT_API_KEY"] = "xxx"
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -253,7 +253,7 @@ from embedchain import Pipeline as App
os.environ["HUGGINGFACE_ACCESS_TOKEN"] = "xxx"
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -283,7 +283,7 @@ from embedchain import Pipeline as App
os.environ["REPLICATE_API_TOKEN"] = "xxx"
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -308,7 +308,7 @@ Setup Google Cloud Platform application credentials by following the instruction
from embedchain import Pipeline as App
# load llm configuration from config.yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml

View File

@@ -25,7 +25,7 @@ Utilizing a vector database alongside Embedchain is a seamless process. All you
from embedchain import Pipeline as App
# load chroma configuration from yaml file
app = App.from_config(yaml_path="config1.yaml")
app = App.from_config(config_path="config1.yaml")
```
```yaml config1.yaml
@@ -64,7 +64,7 @@ pip install --upgrade 'embedchain[elasticsearch]'
from embedchain import Pipeline as App
# load elasticsearch configuration from yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -73,8 +73,11 @@ vectordb:
config:
collection_name: 'es-index'
es_url: http://localhost:9200
allow_reset: true
http_auth:
- admin
- admin
api_key: xxx
verify_certs: false
```
</CodeGroup>
@@ -92,19 +95,19 @@ pip install --upgrade 'embedchain[opensearch]'
from embedchain import Pipeline as App
# load opensearch configuration from yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
vectordb:
provider: opensearch
config:
collection_name: 'my-app'
opensearch_url: 'https://localhost:9200'
http_auth:
- admin
- admin
vector_dimension: 1536
collection_name: 'my-app'
use_ssl: false
verify_certs: false
```
@@ -131,7 +134,7 @@ os.environ['ZILLIZ_CLOUD_URI'] = 'https://xxx.zillizcloud.com'
os.environ['ZILLIZ_CLOUD_TOKEN'] = 'xxx'
# load zilliz configuration from yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -167,7 +170,7 @@ In order to use Pinecone as vector database, set the environment variables `PINE
from embedchain import Pipeline as App
# load pinecone configuration from yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -190,7 +193,7 @@ In order to use Qdrant as a vector database, set the environment variables `QDRA
from embedchain import Pipeline as App
# load qdrant configuration from yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml
@@ -210,7 +213,7 @@ In order to use Weaviate as a vector database, set the environment variables `WE
from embedchain import Pipeline as App
# load weaviate configuration from yaml file
app = App.from_config(yaml_path="config.yaml")
app = App.from_config(config_path="config.yaml")
```
```yaml config.yaml