Update max_token and formatting (#2273)

This commit is contained in:
Dev Khant
2025-02-28 15:59:34 +05:30
committed by GitHub
parent 6acb00731d
commit b131c4bfc4
25 changed files with 31 additions and 32 deletions

View File

@@ -26,7 +26,7 @@ class AzureOpenAIEmbedding(EmbeddingBase):
default_headers=default_headers,
)
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using OpenAI.

View File

@@ -18,7 +18,7 @@ class EmbeddingBase(ABC):
self.config = config
@abstractmethod
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]]):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]]):
"""
Get the embedding for the given text.

View File

@@ -18,7 +18,7 @@ class GoogleGenAIEmbedding(EmbeddingBase):
genai.configure(api_key=api_key)
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Google Generative AI.
Args:

View File

@@ -16,7 +16,7 @@ class HuggingFaceEmbedding(EmbeddingBase):
self.config.embedding_dims = self.config.embedding_dims or self.model.get_sentence_embedding_dimension()
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Hugging Face.

View File

@@ -39,7 +39,7 @@ class OllamaEmbedding(EmbeddingBase):
if not any(model.get("name") == self.config.model for model in local_models):
self.client.pull(self.config.model)
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Ollama.

View File

@@ -18,7 +18,7 @@ class OpenAIEmbedding(EmbeddingBase):
base_url = self.config.openai_base_url or os.getenv("OPENAI_API_BASE")
self.client = OpenAI(api_key=api_key, base_url=base_url)
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using OpenAI.

View File

@@ -17,7 +17,7 @@ class TogetherEmbedding(EmbeddingBase):
self.config.embedding_dims = self.config.embedding_dims or 768
self.client = Together(api_key=api_key)
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using OpenAI.

View File

@@ -13,13 +13,13 @@ class VertexAIEmbedding(EmbeddingBase):
self.config.model = self.config.model or "text-embedding-004"
self.config.embedding_dims = self.config.embedding_dims or 256
self.embedding_types = {
"add": self.config.memory_add_embedding_type or "RETRIEVAL_DOCUMENT",
"update": self.config.memory_update_embedding_type or "RETRIEVAL_DOCUMENT",
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY"
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY",
}
credentials_path = self.config.vertex_credentials_json
if credentials_path:
@@ -31,7 +31,7 @@ class VertexAIEmbedding(EmbeddingBase):
self.model = TextEmbeddingModel.from_pretrained(self.config.model)
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Vertex AI.
@@ -45,9 +45,9 @@ class VertexAIEmbedding(EmbeddingBase):
if memory_action is not None:
if memory_action not in self.embedding_types:
raise ValueError(f"Invalid memory action: {memory_action}")
embedding_type = self.embedding_types[memory_action]
text_input = TextEmbeddingInput(text=text, task_type=embedding_type)
embeddings = self.model.get_embeddings(texts=[text_input], output_dimensionality=self.config.embedding_dims)