[Feature] Add support for RAG evaluation (#1154)

Co-authored-by: Deven Patel <deven298@yahoo.com>
Co-authored-by: Deshraj Yadav <deshrajdry@gmail.com>
This commit is contained in:
Deven Patel
2024-01-11 20:02:47 +05:30
committed by GitHub
parent 69e83adae0
commit e2cca61cd3
18 changed files with 788 additions and 21 deletions

17
embedchain/utils/eval.py Normal file
View File

@@ -0,0 +1,17 @@
from enum import Enum
from typing import Optional
from pydantic import BaseModel
class EvalMetric(Enum):
CONTEXT_RELEVANCY = "context_relevancy"
ANSWER_RELEVANCY = "answer_relevancy"
GROUNDEDNESS = "groundedness"
class EvalData(BaseModel):
question: str
contexts: list[str]
answer: str
ground_truth: Optional[str] = None # Not used as of now

View File

@@ -201,7 +201,8 @@ def detect_datatype(source: Any) -> DataType:
formatted_source = format_source(str(source), 30)
if url:
from langchain.document_loaders.youtube import ALLOWED_NETLOCK as YOUTUBE_ALLOWED_NETLOCS
from langchain.document_loaders.youtube import \
ALLOWED_NETLOCK as YOUTUBE_ALLOWED_NETLOCS
if url.netloc in YOUTUBE_ALLOWED_NETLOCS:
logging.debug(f"Source of `{formatted_source}` detected as `youtube_video`.")