From 1b1f02eb57e5116f6e92c7dec4f17c402f69628d Mon Sep 17 00:00:00 2001 From: Anusha Yella <136127167+techcontributor@users.noreply.github.com> Date: Wed, 2 Apr 2025 10:36:02 +0530 Subject: [PATCH] fix/failing-unit-tests (#2486) --- tests/embeddings/test_lm_studio_embeddings.py | 28 +++++------------ tests/llms/test_lm_studio.py | 30 +++++++++++++------ tests/vector_stores/test_azure_ai_search.py | 2 +- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/tests/embeddings/test_lm_studio_embeddings.py b/tests/embeddings/test_lm_studio_embeddings.py index 2ba81499..26d174bb 100644 --- a/tests/embeddings/test_lm_studio_embeddings.py +++ b/tests/embeddings/test_lm_studio_embeddings.py @@ -6,10 +6,12 @@ from mem0.configs.embeddings.base import BaseEmbedderConfig @pytest.fixture def mock_lm_studio_client(): - with patch("mem0.embeddings.lmstudio.Client") as mock_lm_studio: + with patch("mem0.embeddings.lmstudio.OpenAI") as mock_openai: mock_client = Mock() - mock_client.list.return_value = {"models": [{"name": "nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"}]} - mock_lm_studio.return_value = mock_client + mock_client.embeddings.create.return_value = Mock( + data=[Mock(embedding=[0.1, 0.2, 0.3, 0.4, 0.5])] + ) + mock_openai.return_value = mock_client yield mock_client @@ -17,25 +19,11 @@ def test_embed_text(mock_lm_studio_client): config = BaseEmbedderConfig(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", embedding_dims=512) embedder = LMStudioEmbedding(config) - mock_response = {"embedding": [0.1, 0.2, 0.3, 0.4, 0.5]} - mock_lm_studio_client.embeddings.return_value = mock_response - text = "Sample text to embed." embedding = embedder.embed(text) - mock_lm_studio_client.embeddings.assert_called_once_with(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", prompt=text) + mock_lm_studio_client.embeddings.create.assert_called_once_with( + input=["Sample text to embed."], model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf" + ) assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5] - - -def test_ensure_model_exists(mock_lm_studio_client): - config = BaseEmbedderConfig(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", embedding_dims=512) - embedder = LMStudioEmbedding(config) - - mock_lm_studio_client.pull.assert_not_called() - - mock_lm_studio_client.list.return_value = {"models": []} - - embedder._ensure_model_exists() - - mock_lm_studio_client.pull.assert_called_once_with("nomic-embed-text") diff --git a/tests/llms/test_lm_studio.py b/tests/llms/test_lm_studio.py index 13d98854..8d0e4871 100644 --- a/tests/llms/test_lm_studio.py +++ b/tests/llms/test_lm_studio.py @@ -8,27 +8,39 @@ from mem0.llms.lmstudio import LMStudioLLM @pytest.fixture def mock_lm_studio_client(): - with patch("mem0.llms.lmstudio.Client") as mock_lm_studio: + with patch("mem0.llms.lmstudio.OpenAI") as mock_openai: # Corrected path mock_client = Mock() - mock_client.list.return_value = {"models": [{"name": "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf"}]} - mock_lm_studio.return_value = mock_client + mock_client.chat.completions.create.return_value = Mock( + choices=[ + Mock(message=Mock(content="I'm doing well, thank you for asking!")) + ] + ) + mock_openai.return_value = mock_client yield mock_client def test_generate_response_without_tools(mock_lm_studio_client): - config = BaseLlmConfig(model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", temperature=0.7, max_tokens=100, top_p=1.0) + config = BaseLlmConfig( + model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + temperature=0.7, + max_tokens=100, + top_p=1.0, + ) llm = LMStudioLLM(config) messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello, how are you?"}, ] - mock_response = {"message": {"content": "I'm doing well, thank you for asking!"}} - mock_lm_studio_client.chat.return_value = mock_response - response = llm.generate_response(messages) - mock_lm_studio_client.chat.assert_called_once_with( - model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", messages=messages, options={"temperature": 0.7, "num_predict": 100, "top_p": 1.0} + mock_lm_studio_client.chat.completions.create.assert_called_once_with( + model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + messages=messages, + temperature=0.7, + max_tokens=100, + top_p=1.0, + response_format={"type": "json_object"}, ) + assert response == "I'm doing well, thank you for asking!" diff --git a/tests/vector_stores/test_azure_ai_search.py b/tests/vector_stores/test_azure_ai_search.py index c1e264d7..686a148e 100644 --- a/tests/vector_stores/test_azure_ai_search.py +++ b/tests/vector_stores/test_azure_ai_search.py @@ -38,7 +38,7 @@ def mock_clients(): # Stub required methods on index_client. mock_index_client.create_or_update_index = Mock() mock_index_client.list_indexes = Mock() - mock_index_client.list_index_names = Mock(return_value=["test-index"]) + mock_index_client.list_index_names = Mock(return_value=[]) mock_index_client.delete_index = Mock() # For col_info() we assume get_index returns an object with name and fields attributes. fake_index = Mock()