feat(LM Studio): Add response_format param for LM Studio to config (#2502)

This commit is contained in:
i-sun
2025-06-17 21:25:18 +09:00
committed by GitHub
parent c70dc7614b
commit 62c330e5b3
4 changed files with 34 additions and 0 deletions

View File

@@ -42,3 +42,30 @@ def test_generate_response_without_tools(mock_lm_studio_client):
)
assert response == "I'm doing well, thank you for asking!"
def test_generate_response_specifying_response_format(mock_lm_studio_client):
config = BaseLlmConfig(
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
temperature=0.7,
max_tokens=100,
top_p=1.0,
lmstudio_response_format={"type": "json_schema"}, # Specifying the response format in config
)
llm = LMStudioLLM(config)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello, how are you?"},
]
response = llm.generate_response(messages)
mock_lm_studio_client.chat.completions.create.assert_called_once_with(
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
messages=messages,
temperature=0.7,
max_tokens=100,
top_p=1.0,
response_format={"type": "json_schema"},
)
assert response == "I'm doing well, thank you for asking!"