Formatting and Client changes (#2247)

This commit is contained in:
Dev Khant
2025-02-23 00:39:26 +05:30
committed by GitHub
parent 17887b5959
commit c42934b7fb
9 changed files with 50 additions and 100 deletions

View File

@@ -6,14 +6,10 @@ from typing import Any, Dict, List, Optional, Union
import httpx
from mem0.memory.setup import get_user_id, setup_config
from mem0.memory.telemetry import capture_client_event
logger = logging.getLogger(__name__)
# Setup user config
setup_config()
warnings.filterwarnings("default", category=DeprecationWarning)
@@ -78,17 +74,16 @@ class MemoryClient:
self.host = host or "https://api.mem0.ai"
self.org_id = org_id
self.project_id = project_id
self.user_id = get_user_id()
if not self.api_key:
raise ValueError("Mem0 API Key not provided. Please provide an API Key.")
self.client = httpx.Client(
base_url=self.host,
headers={"Authorization": f"Token {self.api_key}", "Mem0-User-ID": self.user_id},
headers={"Authorization": f"Token {self.api_key}"},
timeout=300,
)
self._validate_api_key()
self.user_email = self._validate_api_key()
capture_client_event("client.init", self)
def _validate_api_key(self):
@@ -104,6 +99,8 @@ class MemoryClient:
self.org_id = data.get("org_id")
self.project_id = data.get("project_id")
return data.get("user_email")
except httpx.HTTPStatusError as e:
try:
error_data = e.response.json()

View File

@@ -36,7 +36,6 @@ class OpenSearchConfig(BaseModel):
extra_fields = input_fields - allowed_fields
if extra_fields:
raise ValueError(
f"Extra fields not allowed: {', '.join(extra_fields)}. "
f"Allowed fields: {', '.join(allowed_fields)}"
f"Extra fields not allowed: {', '.join(extra_fields)}. " f"Allowed fields: {', '.join(allowed_fields)}"
)
return values

View File

@@ -178,7 +178,7 @@ class Memory(MemoryBase):
retrieved_old_memory.append({"id": mem.id, "text": mem.payload["data"]})
unique_data = {}
for item in retrieved_old_memory:
unique_data[item['id']] = item
unique_data[item["id"]] = item
retrieved_old_memory = list(unique_data.values())
logging.info(f"Total existing memories: {len(retrieved_old_memory)}")

View File

@@ -29,7 +29,7 @@ class AnonymousTelemetry:
if not MEM0_TELEMETRY:
self.posthog.disabled = True
def capture_event(self, event_name, properties=None):
def capture_event(self, event_name, properties=None, user_email=None):
if properties is None:
properties = {}
properties = {
@@ -43,7 +43,8 @@ class AnonymousTelemetry:
"machine": platform.machine(),
**properties,
}
self.posthog.capture(distinct_id=self.user_id, event=event_name, properties=properties)
distinct_id = self.user_id if user_email is None else user_email
self.posthog.capture(distinct_id=distinct_id, event=event_name, properties=properties)
def close(self):
self.posthog.shutdown()
@@ -82,4 +83,4 @@ def capture_client_event(event_name, instance, additional_data=None):
if additional_data:
event_data.update(additional_data)
telemetry.capture_event(event_name, event_data)
telemetry.capture_event(event_name, event_data, instance.user_email)

View File

@@ -55,8 +55,11 @@ def get_image_description(image_url):
{
"role": "user",
"content": [
{"type": "text", "text": "Provide a description of the image and do not include any additional text."},
{"type": "image_url", "image_url": {"url": image_url}}
{
"type": "text",
"text": "Provide a description of the image and do not include any additional text.",
},
{"type": "image_url", "image_url": {"url": image_url}},
],
},
],

View File

@@ -68,7 +68,7 @@ class VectorStoreFactory:
"azure_ai_search": "mem0.vector_stores.azure_ai_search.AzureAISearch",
"redis": "mem0.vector_stores.redis.RedisDB",
"elasticsearch": "mem0.vector_stores.elasticsearch.ElasticsearchDB",
"opensearch": "mem0.vector_stores.opensearch.OpenSearchDB"
"opensearch": "mem0.vector_stores.opensearch.OpenSearchDB",
}
@classmethod

View File

@@ -118,8 +118,7 @@ class AzureAISearch(VectorStoreBase):
logger.info(f"Inserting {len(vectors)} vectors into index {self.index_name}")
documents = [
self._generate_document(vector, payload, id)
for id, vector, payload in zip(ids, vectors, payloads)
self._generate_document(vector, payload, id) for id, vector, payload in zip(ids, vectors, payloads)
]
self.search_client.upload_documents(documents)
@@ -133,7 +132,7 @@ class AzureAISearch(VectorStoreBase):
condition = f"{key} eq {value}"
filter_conditions.append(condition)
# Use 'and' to join multiple conditions
filter_expression = ' and '.join(filter_conditions)
filter_expression = " and ".join(filter_conditions)
return filter_expression
def search(self, query, limit=5, filters=None):
@@ -152,14 +151,8 @@ class AzureAISearch(VectorStoreBase):
if filters:
filter_expression = self._build_filter_expression(filters)
vector_query = VectorizedQuery(
vector=query, k_nearest_neighbors=limit, fields="vector"
)
search_results = self.search_client.search(
vector_queries=[vector_query],
filter=filter_expression,
top=limit
)
vector_query = VectorizedQuery(vector=query, k_nearest_neighbors=limit, fields="vector")
search_results = self.search_client.search(vector_queries=[vector_query], filter=filter_expression, top=limit)
results = []
for result in search_results:
@@ -245,11 +238,7 @@ class AzureAISearch(VectorStoreBase):
if filters:
filter_expression = self._build_filter_expression(filters)
search_results = self.search_client.search(
search_text="*",
filter=filter_expression,
top=limit
)
search_results = self.search_client.search(search_text="*", filter=filter_expression, top=limit)
results = []
for result in search_results:
payload = json.loads(result["payload"])

View File

@@ -49,30 +49,14 @@ class ElasticsearchDB(VectorStoreBase):
def create_index(self) -> None:
"""Create Elasticsearch index with proper mappings if it doesn't exist"""
index_settings = {
"settings": {
"index": {
"number_of_replicas": 1,
"number_of_shards": 5,
"refresh_interval": "1s"
}
},
"settings": {"index": {"number_of_replicas": 1, "number_of_shards": 5, "refresh_interval": "1s"}},
"mappings": {
"properties": {
"text": {"type": "text"},
"vector": {
"type": "dense_vector",
"dims": self.vector_dim,
"index": True,
"similarity": "cosine"
},
"metadata": {
"type": "object",
"properties": {
"user_id": {"type": "keyword"}
}
}
"vector": {"type": "dense_vector", "dims": self.vector_dim, "index": True, "similarity": "cosine"},
"metadata": {"type": "object", "properties": {"user_id": {"type": "keyword"}}},
}
}
},
}
if not self.client.indices.exists(index=self.collection_name):
@@ -114,8 +98,8 @@ class ElasticsearchDB(VectorStoreBase):
"_id": id_,
"_source": {
"vector": vec,
"metadata": payloads[i] # Store all metadata in the metadata field
}
"metadata": payloads[i], # Store all metadata in the metadata field
},
}
actions.append(action)
@@ -127,7 +111,7 @@ class ElasticsearchDB(VectorStoreBase):
OutputData(
id=id_,
score=1.0, # Default score for inserts
payload=payloads[i]
payload=payloads[i],
)
)
return results
@@ -136,35 +120,20 @@ class ElasticsearchDB(VectorStoreBase):
"""Search for similar vectors using KNN search with pre-filtering."""
if not filters:
# If no filters, just do KNN search
search_query = {
"knn": {
"field": "vector",
"query_vector": query,
"k": limit,
"num_candidates": limit * 2
}
}
search_query = {"knn": {"field": "vector", "query_vector": query, "k": limit, "num_candidates": limit * 2}}
else:
# If filters exist, apply them with KNN search
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({
"term": {
f"metadata.{key}": value
}
})
filter_conditions.append({"term": {f"metadata.{key}": value}})
search_query = {
"knn": {
"field": "vector",
"query_vector": query,
"k": limit,
"num_candidates": limit * 2,
"filter": {
"bool": {
"must": filter_conditions
}
}
"filter": {"bool": {"must": filter_conditions}},
}
}
@@ -173,11 +142,7 @@ class ElasticsearchDB(VectorStoreBase):
results = []
for hit in response["hits"]["hits"]:
results.append(
OutputData(
id=hit["_id"],
score=hit["_score"],
payload=hit.get("_source", {}).get("metadata", {})
)
OutputData(id=hit["_id"], score=hit["_score"], payload=hit.get("_source", {}).get("metadata", {}))
)
return results
@@ -203,7 +168,7 @@ class ElasticsearchDB(VectorStoreBase):
return OutputData(
id=response["_id"],
score=1.0, # Default score for direct get
payload=response["_source"].get("metadata", {})
payload=response["_source"].get("metadata", {}),
)
except KeyError as e:
logger.warning(f"Missing key in Elasticsearch response: {e}")
@@ -234,16 +199,8 @@ class ElasticsearchDB(VectorStoreBase):
if filters:
filter_conditions = []
for key, value in filters.items():
filter_conditions.append({
"term": {
f"metadata.{key}": value
}
})
query["query"] = {
"bool": {
"must": filter_conditions
}
}
filter_conditions.append({"term": {f"metadata.{key}": value}})
query["query"] = {"bool": {"must": filter_conditions}}
if limit:
query["size"] = limit
@@ -256,7 +213,7 @@ class ElasticsearchDB(VectorStoreBase):
OutputData(
id=hit["_id"],
score=1.0, # Default score for list operation
payload=hit.get("_source", {}).get("metadata", {})
payload=hit.get("_source", {}).get("metadata", {}),
)
)

View File

@@ -50,10 +50,7 @@ class OpenSearchDB(VectorStoreBase):
"mappings": {
"properties": {
"text": {"type": "text"},
"vector": {
"type": "knn_vector",
"dimension": self.vector_dim
},
"vector": {"type": "knn_vector", "dimension": self.vector_dim},
"metadata": {"type": "object", "properties": {"user_id": {"type": "keyword"}}},
}
},
@@ -73,7 +70,7 @@ class OpenSearchDB(VectorStoreBase):
"vector": {
"type": "knn_vector",
"dimension": vector_size,
"method": { "engine": "lucene", "name": "hnsw", "space_type": "cosinesimil"},
"method": {"engine": "lucene", "name": "hnsw", "space_type": "cosinesimil"},
},
"payload": {"type": "object"},
"id": {"type": "keyword"},
@@ -125,12 +122,12 @@ class OpenSearchDB(VectorStoreBase):
"k": limit,
}
}
}
},
}
if filters:
filter_conditions = [{"term": {f"metadata.{key}": value}} for key, value in filters.items()]
search_query["query"]["knn"]["vector"]["filter"] = { "bool": {"filter": filter_conditions} }
search_query["query"]["knn"]["vector"]["filter"] = {"bool": {"filter": filter_conditions}}
response = self.client.search(index=self.collection_name, body=search_query)
@@ -180,10 +177,17 @@ class OpenSearchDB(VectorStoreBase):
query = {"query": {"match_all": {}}}
if filters:
query["query"] = {"bool": {"must": [{"term": {f"metadata.{key}": value}} for key, value in filters.items()]}}
query["query"] = {
"bool": {"must": [{"term": {f"metadata.{key}": value}} for key, value in filters.items()]}
}
if limit:
query["size"] = limit
response = self.client.search(index=self.collection_name, body=query)
return [[OutputData(id=hit["_id"], score=1.0, payload=hit["_source"].get("metadata", {})) for hit in response["hits"]["hits"]]]
return [
[
OutputData(id=hit["_id"], score=1.0, payload=hit["_source"].get("metadata", {}))
for hit in response["hits"]["hits"]
]
]