fix(llm): consume llm base url config with a better way (#1861)

This commit is contained in:
Mathew Shen
2024-09-24 12:35:09 +08:00
committed by GitHub
parent 56ceecb4e3
commit 8511eca03b
5 changed files with 40 additions and 6 deletions

View File

@@ -1,5 +1,5 @@
from unittest.mock import Mock, patch
import os
import pytest
from mem0.configs.llms.base import BaseLlmConfig
@@ -14,6 +14,30 @@ def mock_openai_client():
yield mock_client
def test_openai_llm_base_url():
# case1: default config: with openai official base url
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key")
llm = OpenAILLM(config)
# Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
assert str(llm.client.base_url) == "https://api.openai.com/v1/"
# case2: with env variable OPENAI_API_BASE
provider_base_url = "https://api.provider.com/v1"
os.environ["OPENAI_API_BASE"] = provider_base_url
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key")
llm = OpenAILLM(config)
# Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
assert str(llm.client.base_url) == provider_base_url + "/"
# case3: with config.openai_base_url
config_base_url = "https://api.config.com/v1"
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1.0, api_key="api_key",
openai_base_url=config_base_url)
llm = OpenAILLM(config)
# Note: openai client will parse the raw base_url into a URL object, which will have a trailing slash
assert str(llm.client.base_url) == config_base_url + "/"
def test_generate_response_without_tools(mock_openai_client):
config = BaseLlmConfig(model="gpt-4o", temperature=0.7, max_tokens=100, top_p=1.0)
llm = OpenAILLM(config)