fix/failing-unit-tests (#2486)
This commit is contained in:
@@ -6,10 +6,12 @@ from mem0.configs.embeddings.base import BaseEmbedderConfig
|
||||
|
||||
@pytest.fixture
|
||||
def mock_lm_studio_client():
|
||||
with patch("mem0.embeddings.lmstudio.Client") as mock_lm_studio:
|
||||
with patch("mem0.embeddings.lmstudio.OpenAI") as mock_openai:
|
||||
mock_client = Mock()
|
||||
mock_client.list.return_value = {"models": [{"name": "nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"}]}
|
||||
mock_lm_studio.return_value = mock_client
|
||||
mock_client.embeddings.create.return_value = Mock(
|
||||
data=[Mock(embedding=[0.1, 0.2, 0.3, 0.4, 0.5])]
|
||||
)
|
||||
mock_openai.return_value = mock_client
|
||||
yield mock_client
|
||||
|
||||
|
||||
@@ -17,25 +19,11 @@ def test_embed_text(mock_lm_studio_client):
|
||||
config = BaseEmbedderConfig(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", embedding_dims=512)
|
||||
embedder = LMStudioEmbedding(config)
|
||||
|
||||
mock_response = {"embedding": [0.1, 0.2, 0.3, 0.4, 0.5]}
|
||||
mock_lm_studio_client.embeddings.return_value = mock_response
|
||||
|
||||
text = "Sample text to embed."
|
||||
embedding = embedder.embed(text)
|
||||
|
||||
mock_lm_studio_client.embeddings.assert_called_once_with(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", prompt=text)
|
||||
mock_lm_studio_client.embeddings.create.assert_called_once_with(
|
||||
input=["Sample text to embed."], model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"
|
||||
)
|
||||
|
||||
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
|
||||
|
||||
def test_ensure_model_exists(mock_lm_studio_client):
|
||||
config = BaseEmbedderConfig(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", embedding_dims=512)
|
||||
embedder = LMStudioEmbedding(config)
|
||||
|
||||
mock_lm_studio_client.pull.assert_not_called()
|
||||
|
||||
mock_lm_studio_client.list.return_value = {"models": []}
|
||||
|
||||
embedder._ensure_model_exists()
|
||||
|
||||
mock_lm_studio_client.pull.assert_called_once_with("nomic-embed-text")
|
||||
|
||||
@@ -8,27 +8,39 @@ from mem0.llms.lmstudio import LMStudioLLM
|
||||
|
||||
@pytest.fixture
|
||||
def mock_lm_studio_client():
|
||||
with patch("mem0.llms.lmstudio.Client") as mock_lm_studio:
|
||||
with patch("mem0.llms.lmstudio.OpenAI") as mock_openai: # Corrected path
|
||||
mock_client = Mock()
|
||||
mock_client.list.return_value = {"models": [{"name": "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf"}]}
|
||||
mock_lm_studio.return_value = mock_client
|
||||
mock_client.chat.completions.create.return_value = Mock(
|
||||
choices=[
|
||||
Mock(message=Mock(content="I'm doing well, thank you for asking!"))
|
||||
]
|
||||
)
|
||||
mock_openai.return_value = mock_client
|
||||
yield mock_client
|
||||
|
||||
|
||||
def test_generate_response_without_tools(mock_lm_studio_client):
|
||||
config = BaseLlmConfig(model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", temperature=0.7, max_tokens=100, top_p=1.0)
|
||||
config = BaseLlmConfig(
|
||||
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
|
||||
temperature=0.7,
|
||||
max_tokens=100,
|
||||
top_p=1.0,
|
||||
)
|
||||
llm = LMStudioLLM(config)
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello, how are you?"},
|
||||
]
|
||||
|
||||
mock_response = {"message": {"content": "I'm doing well, thank you for asking!"}}
|
||||
mock_lm_studio_client.chat.return_value = mock_response
|
||||
|
||||
response = llm.generate_response(messages)
|
||||
|
||||
mock_lm_studio_client.chat.assert_called_once_with(
|
||||
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", messages=messages, options={"temperature": 0.7, "num_predict": 100, "top_p": 1.0}
|
||||
mock_lm_studio_client.chat.completions.create.assert_called_once_with(
|
||||
model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=100,
|
||||
top_p=1.0,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
|
||||
assert response == "I'm doing well, thank you for asking!"
|
||||
|
||||
@@ -38,7 +38,7 @@ def mock_clients():
|
||||
# Stub required methods on index_client.
|
||||
mock_index_client.create_or_update_index = Mock()
|
||||
mock_index_client.list_indexes = Mock()
|
||||
mock_index_client.list_index_names = Mock(return_value=["test-index"])
|
||||
mock_index_client.list_index_names = Mock(return_value=[])
|
||||
mock_index_client.delete_index = Mock()
|
||||
# For col_info() we assume get_index returns an object with name and fields attributes.
|
||||
fake_index = Mock()
|
||||
|
||||
Reference in New Issue
Block a user