From 468db83337cb9c19135f68b35611794be435cac9 Mon Sep 17 00:00:00 2001 From: Taranjeet Singh Date: Tue, 20 Jun 2023 14:42:52 +0530 Subject: [PATCH 1/3] Add simple app functionality This commit enables anyone to create a app and add 3 types of data sources: * pdf file * youtube video * website It exposes a function called query which first gets similar docs from vector db and then passes it to LLM to get the final answer. --- embedchain/__init__.py | 0 embedchain/chunkers/__init__.py | 0 embedchain/chunkers/pdf_file.py | 36 +++++++ embedchain/chunkers/website.py | 36 +++++++ embedchain/chunkers/youtube_video.py | 36 +++++++ embedchain/embedchain.py | 136 +++++++++++++++++++++++++++ embedchain/loaders/__init__.py | 0 embedchain/loaders/pdf_file.py | 23 +++++ embedchain/loaders/website.py | 30 ++++++ embedchain/loaders/youtube_video.py | 22 +++++ embedchain/utils.py | 10 ++ 11 files changed, 329 insertions(+) create mode 100644 embedchain/__init__.py create mode 100644 embedchain/chunkers/__init__.py create mode 100644 embedchain/chunkers/pdf_file.py create mode 100644 embedchain/chunkers/website.py create mode 100644 embedchain/chunkers/youtube_video.py create mode 100644 embedchain/embedchain.py create mode 100644 embedchain/loaders/__init__.py create mode 100644 embedchain/loaders/pdf_file.py create mode 100644 embedchain/loaders/website.py create mode 100644 embedchain/loaders/youtube_video.py create mode 100644 embedchain/utils.py diff --git a/embedchain/__init__.py b/embedchain/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/embedchain/chunkers/__init__.py b/embedchain/chunkers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/embedchain/chunkers/pdf_file.py b/embedchain/chunkers/pdf_file.py new file mode 100644 index 00000000..ad760594 --- /dev/null +++ b/embedchain/chunkers/pdf_file.py @@ -0,0 +1,36 @@ +import hashlib + +from langchain.text_splitter import RecursiveCharacterTextSplitter + + +TEXT_SPLITTER_CHUNK_PARAMS = { + "chunk_size": 1000, + "chunk_overlap": 0, + "length_function": len, +} + +TEXT_SPLITTER = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) + + +class PdfFileChunker: + + def create_chunks(self, loader, url): + documents = [] + ids = [] + datas = loader.load_data(url) + metadatas = [] + for data in datas: + content = data["content"] + meta_data = data["meta_data"] + chunks = TEXT_SPLITTER.split_text(content) + url = meta_data["url"] + for chunk in chunks: + chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest() + ids.append(chunk_id) + documents.append(chunk) + metadatas.append(meta_data) + return { + "documents": documents, + "ids": ids, + "metadatas": metadatas, + } \ No newline at end of file diff --git a/embedchain/chunkers/website.py b/embedchain/chunkers/website.py new file mode 100644 index 00000000..6d70131d --- /dev/null +++ b/embedchain/chunkers/website.py @@ -0,0 +1,36 @@ +import hashlib + +from langchain.text_splitter import RecursiveCharacterTextSplitter + + +TEXT_SPLITTER_CHUNK_PARAMS = { + "chunk_size": 500, + "chunk_overlap": 0, + "length_function": len, +} + +TEXT_SPLITTER = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) + + +class WebsiteChunker: + + def create_chunks(self, loader, url): + documents = [] + ids = [] + datas = loader.load_data(url) + metadatas = [] + for data in datas: + content = data["content"] + meta_data = data["meta_data"] + chunks = TEXT_SPLITTER.split_text(content) + url = meta_data["url"] + for chunk in chunks: + chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest() + ids.append(chunk_id) + documents.append(chunk) + metadatas.append(meta_data) + return { + "documents": documents, + "ids": ids, + "metadatas": metadatas, + } \ No newline at end of file diff --git a/embedchain/chunkers/youtube_video.py b/embedchain/chunkers/youtube_video.py new file mode 100644 index 00000000..6c4b1ae1 --- /dev/null +++ b/embedchain/chunkers/youtube_video.py @@ -0,0 +1,36 @@ +import hashlib + +from langchain.text_splitter import RecursiveCharacterTextSplitter + + +TEXT_SPLITTER_CHUNK_PARAMS = { + "chunk_size": 2000, + "chunk_overlap": 0, + "length_function": len, +} + +TEXT_SPLITTER = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) + + +class YoutubeVideoChunker: + + def create_chunks(self, loader, url): + documents = [] + ids = [] + datas = loader.load_data(url) + metadatas = [] + for data in datas: + content = data["content"] + meta_data = data["meta_data"] + chunks = TEXT_SPLITTER.split_text(content) + url = meta_data["url"] + for chunk in chunks: + chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest() + ids.append(chunk_id) + documents.append(chunk) + metadatas.append(meta_data) + return { + "documents": documents, + "ids": ids, + "metadatas": metadatas, + } \ No newline at end of file diff --git a/embedchain/embedchain.py b/embedchain/embedchain.py new file mode 100644 index 00000000..1aa62f84 --- /dev/null +++ b/embedchain/embedchain.py @@ -0,0 +1,136 @@ +import chromadb +import openai +import os + +from chromadb.utils import embedding_functions +from dotenv import load_dotenv +from langchain.docstore.document import Document +from langchain.embeddings.openai import OpenAIEmbeddings + +from embedchain.loaders.youtube_video import YoutubeVideoLoader +from embedchain.loaders.pdf_file import PdfFileLoader +from embedchain.loaders.website import WebsiteLoader +from embedchain.chunkers.youtube_video import YoutubeVideoChunker +from embedchain.chunkers.pdf_file import PdfFileChunker +from embedchain.chunkers.website import WebsiteChunker + +load_dotenv() + +embeddings = OpenAIEmbeddings() + +ABS_PATH = os.getcwd() +DB_DIR = os.path.join(ABS_PATH, "db") + +openai_ef = embedding_functions.OpenAIEmbeddingFunction( + api_key=os.getenv("OPENAI_API_KEY"), + model_name="text-embedding-ada-002" +) + + +class EmbedChain: + def __init__(self): + self.chromadb_client = self._get_or_create_db() + self.collection = self._get_or_create_collection() + self.user_asks = [] + + def _get_loader(self, data_type): + loaders = { + 'youtube_video': YoutubeVideoLoader(), + 'pdf_file': PdfFileLoader(), + 'website': WebsiteLoader() + } + if data_type in loaders: + return loaders[data_type] + else: + raise ValueError(f"Unsupported data type: {data_type}") + + def _get_chunker(self, data_type): + chunkers = { + 'youtube_video': YoutubeVideoChunker(), + 'pdf_file': PdfFileChunker(), + 'website': WebsiteChunker() + } + if data_type in chunkers: + return chunkers[data_type] + else: + raise ValueError(f"Unsupported data type: {data_type}") + + def add(self, data_type, url): + loader = self._get_loader(data_type) + chunker = self._get_chunker(data_type) + self.user_asks.append([data_type, url]) + self.load_and_embed(loader, chunker, url) + + def _get_or_create_db(self): + client_settings = chromadb.config.Settings( + chroma_db_impl="duckdb+parquet", + persist_directory=DB_DIR, + anonymized_telemetry=False + ) + return chromadb.Client(client_settings) + + def _get_or_create_collection(self): + return self.chromadb_client.get_or_create_collection( + 'embedchain_store', embedding_function=openai_ef, + ) + + def load_embeddings_to_db(self, loader, chunker, url): + embeddings_data = chunker.create_chunks(loader, url) + documents = embeddings_data["documents"] + metadatas = embeddings_data["metadatas"] + ids = embeddings_data["ids"] + self.collection.add( + documents=documents, + metadatas=metadatas, + ids=ids + ) + print(f"Docs count: {self.collection.count()}") + + def load_and_embed(self, loader, chunker, url): + return self.load_embeddings_to_db(loader, chunker, url) + + def _format_result(self, results): + return [ + (Document(page_content=result[0], metadata=result[1] or {}), result[2]) + for result in zip( + results["documents"][0], + results["metadatas"][0], + results["distances"][0], + ) + ] + + def get_openai_answer(self, prompt): + messages = [] + messages.append({ + "role": "user", "content": prompt + }) + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo-0613", + messages=messages, + temperature=0, + max_tokens=1000, + top_p=1, + ) + return response["choices"][0]["message"]["content"] + + def get_answer_from_llm(self, query, context): + prompt = f"""Use the following pieces of context to answer the query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. + {context} + Query: {query} + Helpful Answer: + """ + answer = self.get_openai_answer(prompt) + return answer + + def query(self, input_query): + result = self.collection.query( + query_texts=[input_query,], + n_results=1, + ) + result_formatted = self._format_result(result) + answer = self.get_answer_from_llm(input_query, result_formatted[0][0].page_content) + return answer + + +class App(EmbedChain): + pass diff --git a/embedchain/loaders/__init__.py b/embedchain/loaders/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/embedchain/loaders/pdf_file.py b/embedchain/loaders/pdf_file.py new file mode 100644 index 00000000..2fec2f87 --- /dev/null +++ b/embedchain/loaders/pdf_file.py @@ -0,0 +1,23 @@ +from langchain.document_loaders import PyPDFLoader + +from embedchain.utils import clean_string + + +class PdfFileLoader: + + def load_data(self, url): + loader = PyPDFLoader(url) + output = [] + pages = loader.load_and_split() + if not len(pages): + raise ValueError("No data found") + for page in pages: + content = page.page_content + content = clean_string(content) + meta_data = page.metadata + meta_data["url"] = url + output.append({ + "content": content, + "meta_data": meta_data, + }) + return output \ No newline at end of file diff --git a/embedchain/loaders/website.py b/embedchain/loaders/website.py new file mode 100644 index 00000000..50d38958 --- /dev/null +++ b/embedchain/loaders/website.py @@ -0,0 +1,30 @@ +import requests + +from bs4 import BeautifulSoup + +from embedchain.utils import clean_string + + +class WebsiteLoader: + + def load_data(self, url): + response = requests.get(url) + data = response.content + soup = BeautifulSoup(data, 'html.parser') + for tag in soup([ + "nav", "aside", "form", "header", + "noscript", "svg", "canvas", + "footer", "script", "style" + ]): + tag.string = " " + output = [] + content = soup.get_text() + content = clean_string(content) + meta_data = { + "url": url, + } + output.append({ + "content": content, + "meta_data": meta_data, + }) + return output \ No newline at end of file diff --git a/embedchain/loaders/youtube_video.py b/embedchain/loaders/youtube_video.py new file mode 100644 index 00000000..8bbda5a9 --- /dev/null +++ b/embedchain/loaders/youtube_video.py @@ -0,0 +1,22 @@ +from langchain.document_loaders import YoutubeLoader + +from embedchain.utils import clean_string + + +class YoutubeVideoLoader: + + def load_data(self, url): + loader = YoutubeLoader.from_youtube_url(url, add_video_info=True) + doc = loader.load() + output = [] + if not len(doc): + raise ValueError("No data found") + content = doc[0].page_content + content = clean_string(content) + meta_data = doc[0].metadata + meta_data["url"] = url + output.append({ + "content": content, + "meta_data": meta_data, + }) + return output diff --git a/embedchain/utils.py b/embedchain/utils.py new file mode 100644 index 00000000..902b551d --- /dev/null +++ b/embedchain/utils.py @@ -0,0 +1,10 @@ +import re + + +def clean_string(text): + text = text.replace('\n', ' ') + cleaned_text = re.sub(r'\s+', ' ', text.strip()) + cleaned_text = cleaned_text.replace('\\', '') + cleaned_text = cleaned_text.replace('#', ' ') + cleaned_text = re.sub(r'([^\w\s])\1*', r'\1', cleaned_text) + return cleaned_text From d2da80f5bcbdbdf00c049876f2646d683cea65d5 Mon Sep 17 00:00:00 2001 From: Taranjeet Singh Date: Tue, 20 Jun 2023 14:50:56 +0530 Subject: [PATCH 2/3] Add import in embedchain init file --- embedchain/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/embedchain/__init__.py b/embedchain/__init__.py index e69de29b..3c09b8d3 100644 --- a/embedchain/__init__.py +++ b/embedchain/__init__.py @@ -0,0 +1 @@ +from .embedchain import App \ No newline at end of file From 4329caa17c634e6f658d8ca508b855fce2c3dda2 Mon Sep 17 00:00:00 2001 From: Taranjeet Singh Date: Tue, 20 Jun 2023 16:30:18 +0530 Subject: [PATCH 3/3] Chunkers: Refactor each chunker & add base class Adds a base chunker from which any chunker can inherit. Existing chunkers are refactored to inherit from this base chunker. --- embedchain/chunkers/base_chunker.py | 27 +++++++++++++++++++++++++ embedchain/chunkers/pdf_file.py | 30 +++++----------------------- embedchain/chunkers/website.py | 30 +++++----------------------- embedchain/chunkers/youtube_video.py | 30 +++++----------------------- 4 files changed, 42 insertions(+), 75 deletions(-) create mode 100644 embedchain/chunkers/base_chunker.py diff --git a/embedchain/chunkers/base_chunker.py b/embedchain/chunkers/base_chunker.py new file mode 100644 index 00000000..bcd5a86d --- /dev/null +++ b/embedchain/chunkers/base_chunker.py @@ -0,0 +1,27 @@ +import hashlib + + +class BaseChunker: + def __init__(self, text_splitter): + self.text_splitter = text_splitter + + def create_chunks(self, loader, url): + documents = [] + ids = [] + datas = loader.load_data(url) + metadatas = [] + for data in datas: + content = data["content"] + meta_data = data["meta_data"] + chunks = self.text_splitter.split_text(content) + url = meta_data["url"] + for chunk in chunks: + chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest() + ids.append(chunk_id) + documents.append(chunk) + metadatas.append(meta_data) + return { + "documents": documents, + "ids": ids, + "metadatas": metadatas, + } diff --git a/embedchain/chunkers/pdf_file.py b/embedchain/chunkers/pdf_file.py index ad760594..47a23c7a 100644 --- a/embedchain/chunkers/pdf_file.py +++ b/embedchain/chunkers/pdf_file.py @@ -1,4 +1,4 @@ -import hashlib +from embedchain.chunkers.base_chunker import BaseChunker from langchain.text_splitter import RecursiveCharacterTextSplitter @@ -9,28 +9,8 @@ TEXT_SPLITTER_CHUNK_PARAMS = { "length_function": len, } -TEXT_SPLITTER = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) - -class PdfFileChunker: - - def create_chunks(self, loader, url): - documents = [] - ids = [] - datas = loader.load_data(url) - metadatas = [] - for data in datas: - content = data["content"] - meta_data = data["meta_data"] - chunks = TEXT_SPLITTER.split_text(content) - url = meta_data["url"] - for chunk in chunks: - chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest() - ids.append(chunk_id) - documents.append(chunk) - metadatas.append(meta_data) - return { - "documents": documents, - "ids": ids, - "metadatas": metadatas, - } \ No newline at end of file +class PdfFileChunker(BaseChunker): + def __init__(self): + text_splitter = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) + super().__init__(text_splitter) \ No newline at end of file diff --git a/embedchain/chunkers/website.py b/embedchain/chunkers/website.py index 6d70131d..090eb730 100644 --- a/embedchain/chunkers/website.py +++ b/embedchain/chunkers/website.py @@ -1,4 +1,4 @@ -import hashlib +from embedchain.chunkers.base_chunker import BaseChunker from langchain.text_splitter import RecursiveCharacterTextSplitter @@ -9,28 +9,8 @@ TEXT_SPLITTER_CHUNK_PARAMS = { "length_function": len, } -TEXT_SPLITTER = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) - -class WebsiteChunker: - - def create_chunks(self, loader, url): - documents = [] - ids = [] - datas = loader.load_data(url) - metadatas = [] - for data in datas: - content = data["content"] - meta_data = data["meta_data"] - chunks = TEXT_SPLITTER.split_text(content) - url = meta_data["url"] - for chunk in chunks: - chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest() - ids.append(chunk_id) - documents.append(chunk) - metadatas.append(meta_data) - return { - "documents": documents, - "ids": ids, - "metadatas": metadatas, - } \ No newline at end of file +class WebsiteChunker(BaseChunker): + def __init__(self): + text_splitter = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) + super().__init__(text_splitter) diff --git a/embedchain/chunkers/youtube_video.py b/embedchain/chunkers/youtube_video.py index 6c4b1ae1..7435c02d 100644 --- a/embedchain/chunkers/youtube_video.py +++ b/embedchain/chunkers/youtube_video.py @@ -1,4 +1,4 @@ -import hashlib +from embedchain.chunkers.base_chunker import BaseChunker from langchain.text_splitter import RecursiveCharacterTextSplitter @@ -9,28 +9,8 @@ TEXT_SPLITTER_CHUNK_PARAMS = { "length_function": len, } -TEXT_SPLITTER = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) - -class YoutubeVideoChunker: - - def create_chunks(self, loader, url): - documents = [] - ids = [] - datas = loader.load_data(url) - metadatas = [] - for data in datas: - content = data["content"] - meta_data = data["meta_data"] - chunks = TEXT_SPLITTER.split_text(content) - url = meta_data["url"] - for chunk in chunks: - chunk_id = hashlib.sha256((chunk + url).encode()).hexdigest() - ids.append(chunk_id) - documents.append(chunk) - metadatas.append(meta_data) - return { - "documents": documents, - "ids": ids, - "metadatas": metadatas, - } \ No newline at end of file +class YoutubeVideoChunker(BaseChunker): + def __init__(self): + text_splitter = RecursiveCharacterTextSplitter(**TEXT_SPLITTER_CHUNK_PARAMS) + super().__init__(text_splitter) \ No newline at end of file