diff --git a/docs/components/embedders/config.mdx b/docs/components/embedders/config.mdx
index 60047daa..dc84c497 100644
--- a/docs/components/embedders/config.mdx
+++ b/docs/components/embedders/config.mdx
@@ -84,6 +84,7 @@ Here's a comprehensive list of all parameters that can be used across different
| `memory_add_embedding_type` | The type of embedding to use for the add memory action | VertexAI |
| `memory_update_embedding_type` | The type of embedding to use for the update memory action | VertexAI |
| `memory_search_embedding_type` | The type of embedding to use for the search memory action | VertexAI |
+| `lmstudio_base_url` | Base URL for LM Studio API | LM Studio |
| Parameter | Description | Provider |
diff --git a/docs/components/embedders/models/lmstudio.mdx b/docs/components/embedders/models/lmstudio.mdx
new file mode 100644
index 00000000..bc767b07
--- /dev/null
+++ b/docs/components/embedders/models/lmstudio.mdx
@@ -0,0 +1,38 @@
+You can use embedding models from LM Studio to run Mem0 locally.
+
+### Usage
+
+```python
+import os
+from mem0 import Memory
+
+os.environ["OPENAI_API_KEY"] = "your_api_key" # For LLM
+
+config = {
+ "embedder": {
+ "provider": "lmstudio",
+ "config": {
+ "model": "nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"
+ }
+ }
+}
+
+m = Memory.from_config(config)
+messages = [
+ {"role": "user", "content": "I'm planning to watch a movie tonight. Any recommendations?"},
+ {"role": "assistant", "content": "How about a thriller movies? They can be quite engaging."},
+ {"role": "user", "content": "I’m not a big fan of thriller movies but I love sci-fi movies."},
+ {"role": "assistant", "content": "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future."}
+]
+m.add(messages, user_id="john")
+```
+
+### Config
+
+Here are the parameters available for configuring Ollama embedder:
+
+| Parameter | Description | Default Value |
+| --- | --- | --- |
+| `model` | The name of the OpenAI model to use | `nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf` |
+| `embedding_dims` | Dimensions of the embedding model | `1536` |
+| `lmstudio_base_url` | Base URL for LM Studio connection | `http://localhost:1234/v1` |
\ No newline at end of file
diff --git a/docs/components/embedders/overview.mdx b/docs/components/embedders/overview.mdx
index b5c57ffb..7630e731 100644
--- a/docs/components/embedders/overview.mdx
+++ b/docs/components/embedders/overview.mdx
@@ -22,6 +22,7 @@ See the list of supported embedders below.
+
## Usage
diff --git a/docs/components/llms/config.mdx b/docs/components/llms/config.mdx
index c829d29a..4c9f2281 100644
--- a/docs/components/llms/config.mdx
+++ b/docs/components/llms/config.mdx
@@ -108,6 +108,7 @@ Here's a comprehensive list of all parameters that can be used across different
| `azure_kwargs` | Azure LLM args for initialization | AzureOpenAI |
| `deepseek_base_url` | Base URL for DeepSeek API | DeepSeek |
| `xai_base_url` | Base URL for XAI API | XAI |
+ | `lmstudio_base_url` | Base URL for LM Studio API | LM Studio |
| Parameter | Description | Provider |
diff --git a/docs/components/llms/models/lmstudio.mdx b/docs/components/llms/models/lmstudio.mdx
new file mode 100644
index 00000000..f88490db
--- /dev/null
+++ b/docs/components/llms/models/lmstudio.mdx
@@ -0,0 +1,82 @@
+---
+title: LM Studio
+---
+
+To use LM Studio with Mem0, you'll need to have LM Studio running locally with its server enabled. LM Studio provides a way to run local LLMs with an OpenAI-compatible API.
+
+## Usage
+
+
+```python Python
+import os
+from mem0 import Memory
+
+os.environ["OPENAI_API_KEY"] = "your-api-key" # used for embedding model
+
+config = {
+ "llm": {
+ "provider": "lmstudio",
+ "config": {
+ "model": "lmstudio-community/Meta-Llama-3.1-70B-Instruct-GGUF/Meta-Llama-3.1-70B-Instruct-IQ2_M.gguf",
+ "temperature": 0.2,
+ "max_tokens": 2000,
+ "lmstudio_base_url": "http://localhost:1234/v1", # default LM Studio API URL
+ }
+ }
+}
+
+m = Memory.from_config(config)
+messages = [
+ {"role": "user", "content": "I'm planning to watch a movie tonight. Any recommendations?"},
+ {"role": "assistant", "content": "How about a thriller movies? They can be quite engaging."},
+ {"role": "user", "content": "I'm not a big fan of thriller movies but I love sci-fi movies."},
+ {"role": "assistant", "content": "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future."}
+]
+m.add(messages, user_id="alice", metadata={"category": "movies"})
+```
+
+
+### Running Completely Locally
+
+You can also use LM Studio for both LLM and embedding to run Mem0 entirely locally:
+
+```python
+from mem0 import Memory
+
+# No external API keys needed!
+config = {
+ "llm": {
+ "provider": "lmstudio"
+ },
+ "embedder": {
+ "provider": "lmstudio"
+ }
+}
+
+m = Memory.from_config(config)
+messages = [
+ {"role": "user", "content": "I'm planning to watch a movie tonight. Any recommendations?"},
+ {"role": "assistant", "content": "How about a thriller movies? They can be quite engaging."},
+ {"role": "user", "content": "I'm not a big fan of thriller movies but I love sci-fi movies."},
+ {"role": "assistant", "content": "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future."}
+]
+m.add(messages, user_id="alice123", metadata={"category": "movies"})
+```
+
+
+ When using LM Studio for both LLM and embedding, make sure you have:
+ 1. An LLM model loaded for generating responses
+ 2. An embedding model loaded for vector embeddings
+ 3. The server enabled with the correct endpoints accessible
+
+
+
+ To use LM Studio, you need to:
+ 1. Download and install [LM Studio](https://lmstudio.ai/)
+ 2. Start a local server from the "Server" tab
+ 3. Set the appropriate `lmstudio_base_url` in your configuration (default is usually http://localhost:1234/v1)
+
+
+## Config
+
+All available parameters for the `lmstudio` config are present in [Master List of All Params in Config](../config).
diff --git a/docs/components/llms/overview.mdx b/docs/components/llms/overview.mdx
index f48a22b1..1c2d8828 100644
--- a/docs/components/llms/overview.mdx
+++ b/docs/components/llms/overview.mdx
@@ -32,6 +32,7 @@ To view all supported llms, visit the [Supported LLMs](./models).
+
## Structured vs Unstructured Outputs
diff --git a/mem0/configs/embeddings/base.py b/mem0/configs/embeddings/base.py
index a234e389..b66b3339 100644
--- a/mem0/configs/embeddings/base.py
+++ b/mem0/configs/embeddings/base.py
@@ -30,6 +30,8 @@ class BaseEmbedderConfig(ABC):
memory_add_embedding_type: Optional[str] = None,
memory_update_embedding_type: Optional[str] = None,
memory_search_embedding_type: Optional[str] = None,
+ # LM Studio specific
+ lmstudio_base_url: Optional[str] = "http://localhost:1234/v1",
):
"""
Initializes a configuration class instance for the Embeddings.
@@ -58,6 +60,8 @@ class BaseEmbedderConfig(ABC):
:type memory_update_embedding_type: Optional[str], optional
:param memory_search_embedding_type: The type of embedding to use for the search memory action, defaults to None
:type memory_search_embedding_type: Optional[str], optional
+ :param lmstudio_base_url: LM Studio base URL to be use, defaults to "http://localhost:1234/v1"
+ :type lmstudio_base_url: Optional[str], optional
"""
self.model = model
@@ -82,3 +86,6 @@ class BaseEmbedderConfig(ABC):
self.memory_add_embedding_type = memory_add_embedding_type
self.memory_update_embedding_type = memory_update_embedding_type
self.memory_search_embedding_type = memory_search_embedding_type
+
+ # LM Studio specific
+ self.lmstudio_base_url = lmstudio_base_url
diff --git a/mem0/configs/llms/base.py b/mem0/configs/llms/base.py
index f55e9e9a..e71cfdbc 100644
--- a/mem0/configs/llms/base.py
+++ b/mem0/configs/llms/base.py
@@ -39,6 +39,8 @@ class BaseLlmConfig(ABC):
deepseek_base_url: Optional[str] = None,
# XAI specific
xai_base_url: Optional[str] = None,
+ # LM Studio specific
+ lmstudio_base_url: Optional[str] = "http://localhost:1234/v1",
):
"""
Initializes a configuration class instance for the LLM.
@@ -83,6 +85,8 @@ class BaseLlmConfig(ABC):
:type deepseek_base_url: Optional[str], optional
:param xai_base_url: XAI base URL to be use, defaults to None
:type xai_base_url: Optional[str], optional
+ :param lmstudio_base_url: LM Studio base URL to be use, defaults to "http://localhost:1234/v1"
+ :type lmstudio_base_url: Optional[str], optional
"""
self.model = model
@@ -116,3 +120,6 @@ class BaseLlmConfig(ABC):
# XAI specific
self.xai_base_url = xai_base_url
+
+ # LM Studio specific
+ self.lmstudio_base_url = lmstudio_base_url
diff --git a/mem0/embeddings/configs.py b/mem0/embeddings/configs.py
index b35ffba4..caf7aea7 100644
--- a/mem0/embeddings/configs.py
+++ b/mem0/embeddings/configs.py
@@ -13,7 +13,7 @@ class EmbedderConfig(BaseModel):
@field_validator("config")
def validate_config(cls, v, values):
provider = values.data.get("provider")
- if provider in ["openai", "ollama", "huggingface", "azure_openai", "gemini", "vertexai", "together"]:
+ if provider in ["openai", "ollama", "huggingface", "azure_openai", "gemini", "vertexai", "together", "lmstudio"]:
return v
else:
raise ValueError(f"Unsupported embedding provider: {provider}")
diff --git a/mem0/embeddings/lmstudio.py b/mem0/embeddings/lmstudio.py
new file mode 100644
index 00000000..0f227081
--- /dev/null
+++ b/mem0/embeddings/lmstudio.py
@@ -0,0 +1,33 @@
+from typing import Literal, Optional
+
+from openai import OpenAI
+
+from mem0.configs.embeddings.base import BaseEmbedderConfig
+from mem0.embeddings.base import EmbeddingBase
+
+
+class LMStudioEmbedding(EmbeddingBase):
+ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
+ super().__init__(config)
+
+ self.config.model = self.config.model or "nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"
+ self.config.embedding_dims = self.config.embedding_dims or 1536
+ self.config.api_key = self.config.api_key or "lm-studio"
+
+ self.client = OpenAI(base_url=self.config.lmstudio_base_url, api_key=self.config.api_key)
+
+ def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
+ """
+ Get the embedding for the given text using LM Studio.
+ Args:
+ text (str): The text to embed.
+ memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
+ Returns:
+ list: The embedding vector.
+ """
+ text = text.replace("\n", " ")
+ return (
+ self.client.embeddings.create(input=[text], model=self.config.model)
+ .data[0]
+ .embedding
+ )
\ No newline at end of file
diff --git a/mem0/llms/configs.py b/mem0/llms/configs.py
index 40287ce1..e94ef43f 100644
--- a/mem0/llms/configs.py
+++ b/mem0/llms/configs.py
@@ -24,6 +24,7 @@ class LlmConfig(BaseModel):
"gemini",
"deepseek",
"xai",
+ "lmstudio",
):
return v
else:
diff --git a/mem0/llms/lmstudio.py b/mem0/llms/lmstudio.py
new file mode 100644
index 00000000..6cb0895e
--- /dev/null
+++ b/mem0/llms/lmstudio.py
@@ -0,0 +1,48 @@
+from typing import Dict, List, Optional
+
+from openai import OpenAI
+
+from mem0.configs.llms.base import BaseLlmConfig
+from mem0.llms.base import LLMBase
+
+
+class LMStudioLLM(LLMBase):
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
+ super().__init__(config)
+
+ self.config.model = self.config.model or "lmstudio-community/Meta-Llama-3.1-70B-Instruct-GGUF/Meta-Llama-3.1-70B-Instruct-IQ2_M.gguf"
+ self.config.api_key = self.config.api_key or "lm-studio"
+
+ self.client = OpenAI(base_url=self.config.lmstudio_base_url, api_key=self.config.api_key)
+
+ def generate_response(
+ self,
+ messages: List[Dict[str, str]],
+ response_format: dict = {"type": "json_object"},
+ tools: Optional[List[Dict]] = None,
+ tool_choice: str = "auto"
+ ):
+ """
+ Generate a response based on the given messages using LM Studio.
+
+ Args:
+ messages (list): List of message dicts containing 'role' and 'content'.
+ response_format (str or object, optional): Format of the response. Defaults to "text".
+ tools (list, optional): List of tools that the model can call. Defaults to None.
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
+
+ Returns:
+ str: The generated response.
+ """
+ params = {
+ "model": self.config.model,
+ "messages": messages,
+ "temperature": self.config.temperature,
+ "max_tokens": self.config.max_tokens,
+ "top_p": self.config.top_p
+ }
+ if response_format:
+ params["response_format"] = response_format
+
+ response = self.client.chat.completions.create(**params)
+ return response.choices[0].message.content
\ No newline at end of file
diff --git a/mem0/utils/factory.py b/mem0/utils/factory.py
index f8af1d68..0d7a377f 100644
--- a/mem0/utils/factory.py
+++ b/mem0/utils/factory.py
@@ -25,6 +25,7 @@ class LlmFactory:
"gemini": "mem0.llms.gemini.GeminiLLM",
"deepseek": "mem0.llms.deepseek.DeepSeekLLM",
"xai": "mem0.llms.xai.XAILLM",
+ "lmstudio": "mem0.llms.lmstudio.LMStudioLLM",
}
@classmethod
@@ -47,6 +48,7 @@ class EmbedderFactory:
"gemini": "mem0.embeddings.gemini.GoogleGenAIEmbedding",
"vertexai": "mem0.embeddings.vertexai.VertexAIEmbedding",
"together": "mem0.embeddings.together.TogetherEmbedding",
+ "lmstudio": "mem0.embeddings.lmstudio.LMStudioEmbedding",
}
@classmethod
diff --git a/tests/embeddings/test_lm_studio_embeddings.py b/tests/embeddings/test_lm_studio_embeddings.py
new file mode 100644
index 00000000..2ba81499
--- /dev/null
+++ b/tests/embeddings/test_lm_studio_embeddings.py
@@ -0,0 +1,41 @@
+import pytest
+from unittest.mock import Mock, patch
+from mem0.embeddings.lmstudio import LMStudioEmbedding
+from mem0.configs.embeddings.base import BaseEmbedderConfig
+
+
+@pytest.fixture
+def mock_lm_studio_client():
+ with patch("mem0.embeddings.lmstudio.Client") as mock_lm_studio:
+ mock_client = Mock()
+ mock_client.list.return_value = {"models": [{"name": "nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf"}]}
+ mock_lm_studio.return_value = mock_client
+ yield mock_client
+
+
+def test_embed_text(mock_lm_studio_client):
+ config = BaseEmbedderConfig(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", embedding_dims=512)
+ embedder = LMStudioEmbedding(config)
+
+ mock_response = {"embedding": [0.1, 0.2, 0.3, 0.4, 0.5]}
+ mock_lm_studio_client.embeddings.return_value = mock_response
+
+ text = "Sample text to embed."
+ embedding = embedder.embed(text)
+
+ mock_lm_studio_client.embeddings.assert_called_once_with(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", prompt=text)
+
+ assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
+
+
+def test_ensure_model_exists(mock_lm_studio_client):
+ config = BaseEmbedderConfig(model="nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.f16.gguf", embedding_dims=512)
+ embedder = LMStudioEmbedding(config)
+
+ mock_lm_studio_client.pull.assert_not_called()
+
+ mock_lm_studio_client.list.return_value = {"models": []}
+
+ embedder._ensure_model_exists()
+
+ mock_lm_studio_client.pull.assert_called_once_with("nomic-embed-text")
diff --git a/tests/llms/test_lm_studio.py b/tests/llms/test_lm_studio.py
new file mode 100644
index 00000000..13d98854
--- /dev/null
+++ b/tests/llms/test_lm_studio.py
@@ -0,0 +1,34 @@
+from unittest.mock import Mock, patch
+
+import pytest
+
+from mem0.configs.llms.base import BaseLlmConfig
+from mem0.llms.lmstudio import LMStudioLLM
+
+
+@pytest.fixture
+def mock_lm_studio_client():
+ with patch("mem0.llms.lmstudio.Client") as mock_lm_studio:
+ mock_client = Mock()
+ mock_client.list.return_value = {"models": [{"name": "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf"}]}
+ mock_lm_studio.return_value = mock_client
+ yield mock_client
+
+
+def test_generate_response_without_tools(mock_lm_studio_client):
+ config = BaseLlmConfig(model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", temperature=0.7, max_tokens=100, top_p=1.0)
+ llm = LMStudioLLM(config)
+ messages = [
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello, how are you?"},
+ ]
+
+ mock_response = {"message": {"content": "I'm doing well, thank you for asking!"}}
+ mock_lm_studio_client.chat.return_value = mock_response
+
+ response = llm.generate_response(messages)
+
+ mock_lm_studio_client.chat.assert_called_once_with(
+ model="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", messages=messages, options={"temperature": 0.7, "num_predict": 100, "top_p": 1.0}
+ )
+ assert response == "I'm doing well, thank you for asking!"