[Feature] Add support for RAG evaluation (#1154)
Co-authored-by: Deven Patel <deven298@yahoo.com> Co-authored-by: Deshraj Yadav <deshrajdry@gmail.com>
This commit is contained in:
17
embedchain/utils/eval.py
Normal file
17
embedchain/utils/eval.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class EvalMetric(Enum):
|
||||
CONTEXT_RELEVANCY = "context_relevancy"
|
||||
ANSWER_RELEVANCY = "answer_relevancy"
|
||||
GROUNDEDNESS = "groundedness"
|
||||
|
||||
|
||||
class EvalData(BaseModel):
|
||||
question: str
|
||||
contexts: list[str]
|
||||
answer: str
|
||||
ground_truth: Optional[str] = None # Not used as of now
|
||||
Reference in New Issue
Block a user