[Feature] Gmail Loader (#841)
This commit is contained in:
22
embedchain/chunkers/gmail.py
Normal file
22
embedchain/chunkers/gmail.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from typing import Optional
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
from embedchain.chunkers.base_chunker import BaseChunker
|
||||
from embedchain.config.add_config import ChunkerConfig
|
||||
from embedchain.helper.json_serializable import register_deserializable
|
||||
|
||||
|
||||
@register_deserializable
|
||||
class GmailChunker(BaseChunker):
|
||||
"""Chunker for gmail."""
|
||||
|
||||
def __init__(self, config: Optional[ChunkerConfig] = None):
|
||||
if config is None:
|
||||
config = ChunkerConfig(chunk_size=1000, chunk_overlap=0, length_function=len)
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=config.chunk_size,
|
||||
chunk_overlap=config.chunk_overlap,
|
||||
length_function=config.length_function,
|
||||
)
|
||||
super().__init__(text_splitter)
|
||||
@@ -1,6 +1,7 @@
|
||||
from embedchain.chunkers.base_chunker import BaseChunker
|
||||
from embedchain.chunkers.docs_site import DocsSiteChunker
|
||||
from embedchain.chunkers.docx_file import DocxFileChunker
|
||||
from embedchain.chunkers.gmail import GmailChunker
|
||||
from embedchain.chunkers.images import ImagesChunker
|
||||
from embedchain.chunkers.json import JSONChunker
|
||||
from embedchain.chunkers.mdx import MdxChunker
|
||||
@@ -22,6 +23,7 @@ from embedchain.loaders.base_loader import BaseLoader
|
||||
from embedchain.loaders.csv import CsvLoader
|
||||
from embedchain.loaders.docs_site_loader import DocsSiteLoader
|
||||
from embedchain.loaders.docx_file import DocxFileLoader
|
||||
from embedchain.loaders.gmail import GmailLoader
|
||||
from embedchain.loaders.images import ImagesLoader
|
||||
from embedchain.loaders.json import JSONLoader
|
||||
from embedchain.loaders.local_qna_pair import LocalQnaPairLoader
|
||||
@@ -84,6 +86,7 @@ class DataFormatter(JSONSerializable):
|
||||
DataType.UNSTRUCTURED: UnstructuredLoader,
|
||||
DataType.JSON: JSONLoader,
|
||||
DataType.OPENAPI: OpenAPILoader,
|
||||
DataType.GMAIL: GmailLoader,
|
||||
}
|
||||
lazy_loaders = {DataType.NOTION}
|
||||
if data_type in loaders:
|
||||
@@ -128,6 +131,7 @@ class DataFormatter(JSONSerializable):
|
||||
DataType.UNSTRUCTURED: UnstructuredFileChunker,
|
||||
DataType.JSON: JSONChunker,
|
||||
DataType.OPENAPI: OpenAPIChunker,
|
||||
DataType.GMAIL: GmailChunker,
|
||||
}
|
||||
if data_type in chunker_classes:
|
||||
chunker_class: type = chunker_classes[data_type]
|
||||
|
||||
124
embedchain/loaders/gmail.py
Normal file
124
embedchain/loaders/gmail.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import quopri
|
||||
from textwrap import dedent
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
try:
|
||||
from llama_index import download_loader
|
||||
except ImportError:
|
||||
raise ImportError("Notion requires extra dependencies. Install with `pip install embedchain[community]`") from None
|
||||
|
||||
from embedchain.loaders.base_loader import BaseLoader
|
||||
from embedchain.utils import clean_string
|
||||
|
||||
|
||||
def get_header(text: str, header: str) -> str:
|
||||
start_string_position = text.find(header)
|
||||
pos_start = text.find(":", start_string_position) + 1
|
||||
pos_end = text.find("\n", pos_start)
|
||||
header = text[pos_start:pos_end]
|
||||
return header.strip()
|
||||
|
||||
|
||||
class GmailLoader(BaseLoader):
|
||||
def load_data(self, query):
|
||||
"""Load data from gmail."""
|
||||
if not os.path.isfile("credentials.json"):
|
||||
raise FileNotFoundError(
|
||||
"You must download the valid credentials file from your google \
|
||||
dev account. Refer this `https://cloud.google.com/docs/authentication/api-keys`"
|
||||
)
|
||||
|
||||
GmailReader = download_loader("GmailReader")
|
||||
loader = GmailReader(query=query, service=None, results_per_page=20)
|
||||
documents = loader.load_data()
|
||||
logging.info(f"Gmail Loader: {len(documents)} mails found for query- {query}")
|
||||
|
||||
data = []
|
||||
data_contents = []
|
||||
logging.info(f"Gmail Loader: {len(documents)} mails found")
|
||||
for document in documents:
|
||||
original_size = len(document.text)
|
||||
|
||||
snippet = document.metadata.get("snippet")
|
||||
meta_data = {
|
||||
"url": document.metadata.get("id"),
|
||||
"date": get_header(document.text, "Date"),
|
||||
"subject": get_header(document.text, "Subject"),
|
||||
"from": get_header(document.text, "From"),
|
||||
"to": get_header(document.text, "To"),
|
||||
"search_query": query,
|
||||
}
|
||||
|
||||
# Decode
|
||||
decoded_bytes = quopri.decodestring(document.text)
|
||||
decoded_str = decoded_bytes.decode("utf-8", errors="replace")
|
||||
|
||||
# Slice
|
||||
mail_start = decoded_str.find("<!DOCTYPE")
|
||||
email_data = decoded_str[mail_start:]
|
||||
|
||||
# Web Page HTML Processing
|
||||
soup = BeautifulSoup(email_data, "html.parser")
|
||||
|
||||
tags_to_exclude = [
|
||||
"nav",
|
||||
"aside",
|
||||
"form",
|
||||
"header",
|
||||
"noscript",
|
||||
"svg",
|
||||
"canvas",
|
||||
"footer",
|
||||
"script",
|
||||
"style",
|
||||
]
|
||||
|
||||
for tag in soup(tags_to_exclude):
|
||||
tag.decompose()
|
||||
|
||||
ids_to_exclude = ["sidebar", "main-navigation", "menu-main-menu"]
|
||||
for id in ids_to_exclude:
|
||||
tags = soup.find_all(id=id)
|
||||
for tag in tags:
|
||||
tag.decompose()
|
||||
|
||||
classes_to_exclude = [
|
||||
"elementor-location-header",
|
||||
"navbar-header",
|
||||
"nav",
|
||||
"header-sidebar-wrapper",
|
||||
"blog-sidebar-wrapper",
|
||||
"related-posts",
|
||||
]
|
||||
|
||||
for class_name in classes_to_exclude:
|
||||
tags = soup.find_all(class_=class_name)
|
||||
for tag in tags:
|
||||
tag.decompose()
|
||||
|
||||
content = soup.get_text()
|
||||
content = clean_string(content)
|
||||
|
||||
cleaned_size = len(content)
|
||||
if original_size != 0:
|
||||
logging.info(
|
||||
f"[{id}] Cleaned page size: {cleaned_size} characters, down from {original_size} (shrunk: {original_size-cleaned_size} chars, {round((1-(cleaned_size/original_size)) * 100, 2)}%)" # noqa:E501
|
||||
)
|
||||
|
||||
result = f"""
|
||||
email from '{meta_data.get('from')}' to '{meta_data.get('to')}'
|
||||
subject: {meta_data.get('subject')}
|
||||
date: {meta_data.get('date')}
|
||||
preview: {snippet}
|
||||
content: f{content}
|
||||
"""
|
||||
data_content = dedent(result)
|
||||
data.append({"content": data_content, "meta_data": meta_data})
|
||||
data_contents.append(data_content)
|
||||
doc_id = hashlib.sha256((query + ", ".join(data_contents)).encode()).hexdigest()
|
||||
response_data = {"doc_id": doc_id, "data": data}
|
||||
return response_data
|
||||
@@ -28,6 +28,7 @@ class IndirectDataType(Enum):
|
||||
UNSTRUCTURED = "unstructured"
|
||||
JSON = "json"
|
||||
OPENAPI = "openapi"
|
||||
GMAIL = "gmail"
|
||||
|
||||
|
||||
class SpecialDataType(Enum):
|
||||
@@ -55,3 +56,4 @@ class DataType(Enum):
|
||||
UNSTRUCTURED = IndirectDataType.UNSTRUCTURED.value
|
||||
JSON = IndirectDataType.JSON.value
|
||||
OPENAPI = IndirectDataType.OPENAPI.value
|
||||
GMAIL = IndirectDataType.GMAIL.value
|
||||
|
||||
@@ -259,6 +259,8 @@ def detect_datatype(source: Any) -> DataType:
|
||||
else:
|
||||
# Source is not a URL.
|
||||
|
||||
# TODO: check if source is gmail query
|
||||
|
||||
# Use text as final fallback.
|
||||
logging.debug(f"Source of `{formatted_source}` detected as `text`.")
|
||||
return DataType.TEXT
|
||||
|
||||
Reference in New Issue
Block a user