diff --git a/docs/features/custom-prompts.mdx b/docs/features/custom-prompts.mdx
new file mode 100644
index 00000000..ff6fe0e6
--- /dev/null
+++ b/docs/features/custom-prompts.mdx
@@ -0,0 +1,109 @@
+---
+title: Custom Prompts
+description: 'Enhance your product experience by adding custom prompts tailored to your needs'
+---
+
+## Introduction to Custom Prompts
+
+Custom prompts allow you to tailor the behavior of your Mem0 instance to specific use cases or domains.
+By defining a custom prompt, you can control how information is extracted, processed, and stored in your memory system.
+
+To create an effective custom prompt:
+1. Be specific about the information to extract.
+2. Provide few-shot examples to guide the LLM.
+3. Ensure examples follow the format shown below.
+
+Example of a custom prompt:
+
+```python
+custom_prompt = """
+Please only extract entities containing customer support information, order details, and user information.
+Here are some few shot examples:
+
+Input: Hi.
+Output: {{"facts" : []}}
+
+Input: The weather is nice today.
+Output: {{"facts" : []}}
+
+Input: My order #12345 hasn't arrived yet.
+Output: {{"facts" : ["Order #12345 not received"]}}
+
+Input: I'm John Doe, and I'd like to return the shoes I bought last week.
+Output: {{"facts" : ["Customer name: John Doe", "Wants to return shoes", "Purchase made last week"]}}
+
+Input: I ordered a red shirt, size medium, but received a blue one instead.
+Output: {{"facts" : ["Ordered red shirt, size medium", "Received blue shirt instead"]}}
+
+Return the facts and customer information in a json format as shown above.
+"""
+
+```
+
+Here we initialize the custom prompt in the config.
+
+```python
+from mem0 import Memory
+
+config = {
+ "llm": {
+ "provider": "openai",
+ "config": {
+ "model": "gpt-4o",
+ "temperature": 0.2,
+ "max_tokens": 1500,
+ }
+ },
+ "custom_prompt": custom_prompt,
+ "version": "v1.1"
+}
+
+m = Memory.from_config(config_dict=config, user_id="alice")
+```
+
+### Example 1
+
+In this example, we are adding a memory of a user ordering a laptop. As seen in the output, the custom prompt is used to extract the relevant information from the user's message.
+
+
+```python Code
+m.add("Yesterday, I ordered a laptop, the order id is 12345", user_id="alice")
+```
+
+```json Output
+{
+ "results": [
+ {
+ "memory": "Ordered a laptop",
+ "event": "ADD"
+ },
+ {
+ "memory": "Order ID: 12345",
+ "event": "ADD"
+ },
+ {
+ "memory": "Order placed yesterday",
+ "event": "ADD"
+ }
+ ],
+ "relations": []
+}
+```
+
+
+### Example 2
+
+In this example, we are adding a memory of a user liking to go on hikes. This add message is not specific to the use-case mentioned in the custom prompt.
+Hence, the memory is not added.
+
+```python Code
+m.add("I like going to hikes", user_id="alice")
+```
+
+```json Output
+{
+ "results": [],
+ "relations": []
+}
+```
+
diff --git a/docs/mint.json b/docs/mint.json
index 481b7928..a21c0cca 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -131,7 +131,7 @@
},
{
"group": "Features",
- "pages": ["features/openai_compatibility"]
+ "pages": ["features/openai_compatibility", "features/custom-prompts"]
}
]
},
diff --git a/mem0/configs/base.py b/mem0/configs/base.py
index 42a0a2b2..a83e00d6 100644
--- a/mem0/configs/base.py
+++ b/mem0/configs/base.py
@@ -56,6 +56,10 @@ class MemoryConfig(BaseModel):
description="The version of the API",
default="v1.0",
)
+ custom_prompt: Optional[str] = Field(
+ description="Custom prompt for the memory",
+ default=None,
+ )
class AzureConfig(BaseModel):
diff --git a/mem0/memory/main.py b/mem0/memory/main.py
index a3bb5024..5516227c 100644
--- a/mem0/memory/main.py
+++ b/mem0/memory/main.py
@@ -28,6 +28,8 @@ logger = logging.getLogger(__name__)
class Memory(MemoryBase):
def __init__(self, config: MemoryConfig = MemoryConfig()):
self.config = config
+
+ self.custom_prompt = self.config.custom_prompt
self.embedding_model = EmbedderFactory.create(
self.config.embedder.provider, self.config.embedder.config
)
@@ -131,7 +133,11 @@ class Memory(MemoryBase):
def _add_to_vector_store(self, messages, metadata, filters):
parsed_messages = parse_messages(messages)
- system_prompt, user_prompt = get_fact_retrieval_messages(parsed_messages)
+ if self.custom_prompt:
+ system_prompt=self.custom_prompt
+ user_prompt=f"Input: {parsed_messages}"
+ else:
+ system_prompt, user_prompt = get_fact_retrieval_messages(parsed_messages)
response = self.llm.generate_response(
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
diff --git a/pyproject.toml b/pyproject.toml
index f006eb78..df07a518 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "mem0ai"
-version = "0.1.12"
+version = "0.1.13"
description = "Long-term memory for AI Agents"
authors = ["Mem0 "]
exclude = [