feat: openai default model uses gpt-4o-mini (#1526)
This commit is contained in:
@@ -13,7 +13,7 @@ def azure_openai_llm():
|
||||
config = BaseLlmConfig(
|
||||
deployment_name="azure_deployment",
|
||||
temperature=0.7,
|
||||
model="gpt-3.5-turbo",
|
||||
model="gpt-4o-mini",
|
||||
max_tokens=50,
|
||||
system_prompt="System Prompt",
|
||||
)
|
||||
@@ -40,7 +40,7 @@ def test_get_answer(azure_openai_llm):
|
||||
mock_chat.assert_called_once_with(
|
||||
deployment_name=azure_openai_llm.config.deployment_name,
|
||||
openai_api_version="2024-02-01",
|
||||
model_name=azure_openai_llm.config.model or "gpt-3.5-turbo",
|
||||
model_name=azure_openai_llm.config.model or "gpt-4o-mini",
|
||||
temperature=azure_openai_llm.config.temperature,
|
||||
max_tokens=azure_openai_llm.config.max_tokens,
|
||||
streaming=azure_openai_llm.config.stream,
|
||||
@@ -60,7 +60,7 @@ def test_get_messages(azure_openai_llm):
|
||||
|
||||
|
||||
def test_when_no_deployment_name_provided():
|
||||
config = BaseLlmConfig(temperature=0.7, model="gpt-3.5-turbo", max_tokens=50, system_prompt="System Prompt")
|
||||
config = BaseLlmConfig(temperature=0.7, model="gpt-4o-mini", max_tokens=50, system_prompt="System Prompt")
|
||||
with pytest.raises(ValueError):
|
||||
llm = AzureOpenAILlm(config)
|
||||
llm.get_llm_model_answer("Test Prompt")
|
||||
@@ -70,7 +70,7 @@ def test_with_api_version():
|
||||
config = BaseLlmConfig(
|
||||
deployment_name="azure_deployment",
|
||||
temperature=0.7,
|
||||
model="gpt-3.5-turbo",
|
||||
model="gpt-4o-mini",
|
||||
max_tokens=50,
|
||||
system_prompt="System Prompt",
|
||||
api_version="2024-02-01",
|
||||
@@ -83,7 +83,7 @@ def test_with_api_version():
|
||||
mock_chat.assert_called_once_with(
|
||||
deployment_name="azure_deployment",
|
||||
openai_api_version="2024-02-01",
|
||||
model_name="gpt-3.5-turbo",
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.7,
|
||||
max_tokens=50,
|
||||
streaming=False,
|
||||
@@ -108,7 +108,7 @@ def test_get_llm_model_answer_with_http_client_proxies():
|
||||
max_tokens=50,
|
||||
stream=False,
|
||||
system_prompt="System prompt",
|
||||
model="gpt-3.5-turbo",
|
||||
model="gpt-4o-mini",
|
||||
http_client_proxies="http://testproxy.mem0.net:8000",
|
||||
)
|
||||
|
||||
@@ -118,7 +118,7 @@ def test_get_llm_model_answer_with_http_client_proxies():
|
||||
mock_chat.assert_called_once_with(
|
||||
deployment_name="azure_deployment",
|
||||
openai_api_version="2024-02-01",
|
||||
model_name="gpt-3.5-turbo",
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.7,
|
||||
max_tokens=50,
|
||||
streaming=False,
|
||||
@@ -144,7 +144,7 @@ def test_get_llm_model_answer_with_http_async_client_proxies():
|
||||
max_tokens=50,
|
||||
stream=False,
|
||||
system_prompt="System prompt",
|
||||
model="gpt-3.5-turbo",
|
||||
model="gpt-4o-mini",
|
||||
http_async_client_proxies={"http://": "http://testproxy.mem0.net:8000"},
|
||||
)
|
||||
|
||||
@@ -154,7 +154,7 @@ def test_get_llm_model_answer_with_http_async_client_proxies():
|
||||
mock_chat.assert_called_once_with(
|
||||
deployment_name="azure_deployment",
|
||||
openai_api_version="2024-02-01",
|
||||
model_name="gpt-3.5-turbo",
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.7,
|
||||
max_tokens=50,
|
||||
streaming=False,
|
||||
|
||||
@@ -24,7 +24,7 @@ def config(env_config):
|
||||
top_p=0.8,
|
||||
stream=False,
|
||||
system_prompt="System prompt",
|
||||
model="gpt-3.5-turbo",
|
||||
model="gpt-4o-mini",
|
||||
http_client_proxies=None,
|
||||
http_async_client_proxies=None,
|
||||
)
|
||||
@@ -211,7 +211,7 @@ def test_get_llm_model_answer_with_http_client_proxies(env_config, mocker):
|
||||
top_p=0.8,
|
||||
stream=False,
|
||||
system_prompt="System prompt",
|
||||
model="gpt-3.5-turbo",
|
||||
model="gpt-4o-mini",
|
||||
http_client_proxies="http://testproxy.mem0.net:8000",
|
||||
)
|
||||
|
||||
@@ -246,7 +246,7 @@ def test_get_llm_model_answer_with_http_async_client_proxies(env_config, mocker)
|
||||
top_p=0.8,
|
||||
stream=False,
|
||||
system_prompt="System prompt",
|
||||
model="gpt-3.5-turbo",
|
||||
model="gpt-4o-mini",
|
||||
http_async_client_proxies={"http://": "http://testproxy.mem0.net:8000"},
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user