Update version to v0.0.77 (#851)
This commit is contained in:
@@ -1,7 +1,7 @@
|
|||||||
from typing import Iterable, Optional, Union
|
from typing import Iterable, Optional, Union
|
||||||
|
|
||||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
|
||||||
from langchain.callbacks.stdout import StdOutCallbackHandler
|
from langchain.callbacks.stdout import StdOutCallbackHandler
|
||||||
|
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||||
|
|
||||||
from embedchain.config import BaseLlmConfig
|
from embedchain.config import BaseLlmConfig
|
||||||
from embedchain.helper.json_serializable import register_deserializable
|
from embedchain.helper.json_serializable import register_deserializable
|
||||||
@@ -48,11 +48,7 @@ class GPT4ALLLlm(BaseLlm):
|
|||||||
if config.top_p:
|
if config.top_p:
|
||||||
kwargs["top_p"] = config.top_p
|
kwargs["top_p"] = config.top_p
|
||||||
|
|
||||||
callbacks = None
|
callbacks = [StreamingStdOutCallbackHandler()] if config.stream else [StdOutCallbackHandler()]
|
||||||
if config.stream:
|
|
||||||
callbacks = [StreamingStdOutCallbackHandler()]
|
|
||||||
else:
|
|
||||||
callbacks =[StdOutCallbackHandler()]
|
|
||||||
|
|
||||||
response = self.instance.generate(prompts=messages, callbacks=callbacks, **kwargs)
|
response = self.instance.generate(prompts=messages, callbacks=callbacks, **kwargs)
|
||||||
answer = ""
|
answer = ""
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ from embedchain.embedder.openai import OpenAIEmbedder
|
|||||||
from embedchain.factory import EmbedderFactory, LlmFactory, VectorDBFactory
|
from embedchain.factory import EmbedderFactory, LlmFactory, VectorDBFactory
|
||||||
from embedchain.helper.json_serializable import register_deserializable
|
from embedchain.helper.json_serializable import register_deserializable
|
||||||
from embedchain.llm.base import BaseLlm
|
from embedchain.llm.base import BaseLlm
|
||||||
|
from embedchain.llm.openai import OpenAILlm
|
||||||
from embedchain.vectordb.base import BaseVectorDB
|
from embedchain.vectordb.base import BaseVectorDB
|
||||||
from embedchain.vectordb.chroma import ChromaDB
|
from embedchain.vectordb.chroma import ChromaDB
|
||||||
|
|
||||||
@@ -77,11 +78,12 @@ class Pipeline(EmbedChain):
|
|||||||
|
|
||||||
self.config = config or PipelineConfig()
|
self.config = config or PipelineConfig()
|
||||||
self.name = self.config.name
|
self.name = self.config.name
|
||||||
self.local_id = self.config.id or str(uuid.uuid4())
|
|
||||||
|
self.config.id = self.local_id = str(uuid.uuid4()) if self.config.id is None else self.config.id
|
||||||
|
|
||||||
self.embedding_model = embedding_model or OpenAIEmbedder()
|
self.embedding_model = embedding_model or OpenAIEmbedder()
|
||||||
self.db = db or ChromaDB()
|
self.db = db or ChromaDB()
|
||||||
self.llm = llm or None
|
self.llm = llm or OpenAILlm()
|
||||||
self._init_db()
|
self._init_db()
|
||||||
|
|
||||||
# setup user id and directory
|
# setup user id and directory
|
||||||
@@ -128,7 +130,7 @@ class Pipeline(EmbedChain):
|
|||||||
self.client = Client()
|
self.client = Client()
|
||||||
else:
|
else:
|
||||||
api_key = input(
|
api_key = input(
|
||||||
"Enter Embedchain API key. You can find the API key at https://app.embedchain.ai/settings/keys/ \n"
|
"🔑 Enter your Embedchain API key. You can find the API key at https://app.embedchain.ai/settings/keys/ \n" # noqa: E501
|
||||||
)
|
)
|
||||||
self.client = Client(api_key=api_key)
|
self.client = Client(api_key=api_key)
|
||||||
|
|
||||||
@@ -150,7 +152,7 @@ class Pipeline(EmbedChain):
|
|||||||
headers={"Authorization": f"Token {self.client.api_key}"},
|
headers={"Authorization": f"Token {self.client.api_key}"},
|
||||||
)
|
)
|
||||||
if r.status_code not in [200, 201]:
|
if r.status_code not in [200, 201]:
|
||||||
raise Exception(f"Error occurred while creating pipeline. Response from API: {r.text}")
|
raise Exception(f"❌ Error occurred while creating pipeline. API response: {r.text}")
|
||||||
|
|
||||||
print(
|
print(
|
||||||
f"🎉🎉🎉 Pipeline created successfully! View your pipeline: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
|
f"🎉🎉🎉 Pipeline created successfully! View your pipeline: https://app.embedchain.ai/pipelines/{r.json()['id']}\n" # noqa: E501
|
||||||
@@ -207,8 +209,7 @@ class Pipeline(EmbedChain):
|
|||||||
printed_value = metadata.get("file_path") if metadata.get("file_path") else data_value
|
printed_value = metadata.get("file_path") if metadata.get("file_path") else data_value
|
||||||
print(f"✅ Data of type: {data_type}, value: {printed_value} added successfully.")
|
print(f"✅ Data of type: {data_type}, value: {printed_value} added successfully.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error occurred during data upload: {str(e)}")
|
print(f"❌ Error occurred during data upload for type {data_type}!. Error: {str(e)}")
|
||||||
print(f"❌ Error occurred during data upload for type {data_type}!")
|
|
||||||
|
|
||||||
def _send_api_request(self, endpoint, payload):
|
def _send_api_request(self, endpoint, payload):
|
||||||
url = f"{self.client.host}{endpoint}"
|
url = f"{self.client.host}{endpoint}"
|
||||||
@@ -237,39 +238,33 @@ class Pipeline(EmbedChain):
|
|||||||
self._upload_data_to_pipeline(data_type, data_value, metadata)
|
self._upload_data_to_pipeline(data_type, data_value, metadata)
|
||||||
self._mark_data_as_uploaded(data_hash)
|
self._mark_data_as_uploaded(data_hash)
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception:
|
||||||
self.logger.error(f"Error occurred during data upload: {str(e)}")
|
|
||||||
print(f"❌ Error occurred during data upload for hash {data_hash}!")
|
print(f"❌ Error occurred during data upload for hash {data_hash}!")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _mark_data_as_uploaded(self, data_hash):
|
def _mark_data_as_uploaded(self, data_hash):
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
"UPDATE data_sources SET is_uploaded = 1 WHERE hash = ? AND pipeline_id = ? AND is_uploaded = 0",
|
"UPDATE data_sources SET is_uploaded = 1 WHERE hash = ? AND pipeline_id = ?",
|
||||||
(data_hash, self.local_id),
|
(data_hash, self.local_id),
|
||||||
)
|
)
|
||||||
self.connection.commit()
|
self.connection.commit()
|
||||||
|
|
||||||
def deploy(self):
|
def deploy(self):
|
||||||
try:
|
if self.client is None:
|
||||||
if self.client is None:
|
self._init_client()
|
||||||
self._init_client()
|
|
||||||
|
|
||||||
pipeline_data = self._create_pipeline()
|
pipeline_data = self._create_pipeline()
|
||||||
self.id = pipeline_data["id"]
|
self.id = pipeline_data["id"]
|
||||||
|
|
||||||
results = self.cursor.execute(
|
results = self.cursor.execute(
|
||||||
"SELECT * FROM data_sources WHERE pipeline_id = ? AND is_uploaded = 0", (self.local_id,)
|
"SELECT * FROM data_sources WHERE pipeline_id = ? AND is_uploaded = 0", (self.local_id,)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
|
|
||||||
if len(results) > 0:
|
if len(results) > 0:
|
||||||
print("🛠️ Adding data to your pipeline...")
|
print("🛠️ Adding data to your pipeline...")
|
||||||
for result in results:
|
for result in results:
|
||||||
data_hash, data_type, data_value = result[0], result[2], result[3]
|
data_hash, data_type, data_value = result[1], result[2], result[3]
|
||||||
self._process_and_upload_data(data_hash, data_type, data_value)
|
self._process_and_upload_data(data_hash, data_type, data_value)
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.exception(f"Error occurred during deployment: {str(e)}")
|
|
||||||
raise HTTPException(status_code=500, detail="Error occurred during deployment.")
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_config(cls, yaml_path: str, auto_deploy: bool = False):
|
def from_config(cls, yaml_path: str, auto_deploy: bool = False):
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "embedchain"
|
name = "embedchain"
|
||||||
version = "0.0.76"
|
version = "0.0.77"
|
||||||
description = "Data platform for LLMs - Load, index, retrieve and sync any unstructured data"
|
description = "Data platform for LLMs - Load, index, retrieve and sync any unstructured data"
|
||||||
authors = ["Taranjeet Singh, Deshraj Yadav"]
|
authors = ["Taranjeet Singh, Deshraj Yadav"]
|
||||||
license = "Apache License"
|
license = "Apache License"
|
||||||
|
|||||||
Reference in New Issue
Block a user