Use either embedder or embedding_model as YAML key (#905)
This commit is contained in:
@@ -130,7 +130,7 @@ class App(EmbedChain):
|
|||||||
app_config_data = config_data.get("app", {})
|
app_config_data = config_data.get("app", {})
|
||||||
llm_config_data = config_data.get("llm", {})
|
llm_config_data = config_data.get("llm", {})
|
||||||
db_config_data = config_data.get("vectordb", {})
|
db_config_data = config_data.get("vectordb", {})
|
||||||
embedder_config_data = config_data.get("embedder", {})
|
embedding_model_config_data = config_data.get("embedding_model", config_data.get("embedder", {}))
|
||||||
|
|
||||||
app_config = AppConfig(**app_config_data.get("config", {}))
|
app_config = AppConfig(**app_config_data.get("config", {}))
|
||||||
|
|
||||||
@@ -140,6 +140,6 @@ class App(EmbedChain):
|
|||||||
db_provider = db_config_data.get("provider", "chroma")
|
db_provider = db_config_data.get("provider", "chroma")
|
||||||
db = VectorDBFactory.create(db_provider, db_config_data.get("config", {}))
|
db = VectorDBFactory.create(db_provider, db_config_data.get("config", {}))
|
||||||
|
|
||||||
embedder_provider = embedder_config_data.get("provider", "openai")
|
embedder_provider = embedding_model_config_data.get("provider", "openai")
|
||||||
embedder = EmbedderFactory.create(embedder_provider, embedder_config_data.get("config", {}))
|
embedder = EmbedderFactory.create(embedder_provider, embedding_model_config_data.get("config", {}))
|
||||||
return cls(config=app_config, llm=llm, db=db, embedder=embedder)
|
return cls(config=app_config, llm=llm, db=db, embedder=embedder)
|
||||||
|
|||||||
@@ -359,7 +359,7 @@ class Pipeline(EmbedChain):
|
|||||||
|
|
||||||
pipeline_config_data = config_data.get("app", {}).get("config", {})
|
pipeline_config_data = config_data.get("app", {}).get("config", {})
|
||||||
db_config_data = config_data.get("vectordb", {})
|
db_config_data = config_data.get("vectordb", {})
|
||||||
embedding_model_config_data = config_data.get("embedding_model", {})
|
embedding_model_config_data = config_data.get("embedding_model", config_data.get("embedder", {}))
|
||||||
llm_config_data = config_data.get("llm", {})
|
llm_config_data = config_data.get("llm", {})
|
||||||
|
|
||||||
pipeline_config = PipelineConfig(**pipeline_config_data)
|
pipeline_config = PipelineConfig(**pipeline_config_data)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
fastapi==0.104.0
|
fastapi==0.104.0
|
||||||
uvicorn==0.23.2
|
uvicorn==0.23.2
|
||||||
embedchain==0.0.90
|
embedchain==0.0.91
|
||||||
embedchain[streamlit, community, opensource, elasticsearch, opensearch, poe, discord, slack, whatsapp, weaviate, pinecone, qdrant, images, huggingface_hub, cohere, milvus, dataloaders, vertexai, llama2, gmail, json]==0.0.90
|
embedchain[streamlit, community, opensource, elasticsearch, opensearch, poe, discord, slack, whatsapp, weaviate, pinecone, qdrant, images, huggingface_hub, cohere, milvus, dataloaders, vertexai, llama2, gmail, json]==0.0.91
|
||||||
sqlalchemy==2.0.22
|
sqlalchemy==2.0.22
|
||||||
python-multipart==0.0.6
|
python-multipart==0.0.6
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "embedchain"
|
name = "embedchain"
|
||||||
version = "0.0.90"
|
version = "0.0.91"
|
||||||
description = "Data platform for LLMs - Load, index, retrieve and sync any unstructured data"
|
description = "Data platform for LLMs - Load, index, retrieve and sync any unstructured data"
|
||||||
authors = [
|
authors = [
|
||||||
"Taranjeet Singh <taranjeet@embedchain.ai>",
|
"Taranjeet Singh <taranjeet@embedchain.ai>",
|
||||||
|
|||||||
Reference in New Issue
Block a user