[Tests] add tests for evaluation metrics (#1174)

Co-authored-by: Deven Patel <deven298@yahoo.com>
This commit is contained in:
Deven Patel
2024-01-15 16:05:58 +05:30
committed by GitHub
parent 325e11f0de
commit 2784bae772
4 changed files with 476 additions and 1 deletions

View File

@@ -21,7 +21,7 @@ class Groundedness(BaseMetric):
def __init__(self, config: Optional[GroundednessConfig] = None):
super().__init__(name=EvalMetric.GROUNDEDNESS.value)
self.config = config or GroundednessConfig()
api_key = self.config.api_key or os.environ["OPENAI_API_KEY"]
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable or pass the `api_key` in config.")
self.client = OpenAI(api_key=api_key)