Update max_token and formatting (#2273)

This commit is contained in:
Dev Khant
2025-02-28 15:59:34 +05:30
committed by GitHub
parent 6acb00731d
commit b131c4bfc4
25 changed files with 31 additions and 32 deletions

View File

@@ -24,7 +24,7 @@ config = {
"config": {
"model": "arn:aws:bedrock:us-east-1:123456789012:model/your-model-name",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
}
}

View File

@@ -19,7 +19,7 @@ config = {
"config": {
"model": "deepseek-chat", # default model
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
"top_p": 1.0
}
}

View File

@@ -19,7 +19,7 @@ config = {
"config": {
"model": "gemini-1.5-flash-latest",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
}
}

View File

@@ -19,7 +19,7 @@ config = {
"config": {
"model": "gemini/gemini-pro",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
}
}

View File

@@ -17,7 +17,7 @@ config = {
"config": {
"model": "mixtral-8x7b-32768",
"temperature": 0.1,
"max_tokens": 1000,
"max_tokens": 2000,
}
}
}

View File

@@ -14,7 +14,7 @@ config = {
"config": {
"model": "gpt-4o-mini",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
}
}

View File

@@ -18,7 +18,7 @@ config = {
"config": {
"model": "gpt-4o",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
}
}

View File

@@ -15,7 +15,7 @@ config = {
"config": {
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
}
}

View File

@@ -21,7 +21,7 @@ config = {
"config": {
"model": "grok-2-latest",
"temperature": 0.1,
"max_tokens": 1000,
"max_tokens": 2000,
}
}
}

View File

@@ -37,7 +37,7 @@ config = {
"config": {
"model": "llama3.1:latest",
"temperature": 0,
"max_tokens": 8000,
"max_tokens": 2000,
"ollama_base_url": "http://localhost:11434", # Ensure this URL is correct
},
},

View File

@@ -53,7 +53,7 @@ config = {
"config": {
"model": "gpt-4o",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
},
"custom_prompt": custom_prompt,

View File

@@ -80,7 +80,7 @@ config = {
"config": {
"model": "gpt-4o",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
},
},
"embedder": {

View File

@@ -81,7 +81,7 @@ config = {
"config": {
"model": "gpt-4o",
"temperature": 0.2,
"max_tokens": 1500,
"max_tokens": 2000,
}
},
"graph_store": {

View File

@@ -16,7 +16,7 @@ class BaseLlmConfig(ABC):
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 3000,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
# Openrouter specific
@@ -48,7 +48,7 @@ class BaseLlmConfig(ABC):
:type temperature: float, optional
:param api_key: OpenAI API key to be use, defaults to None
:type api_key: Optional[str], optional
:param max_tokens: Controls how many tokens are generated, defaults to 3000
:param max_tokens: Controls how many tokens are generated, defaults to 2000
:type max_tokens: int, optional
:param top_p: Controls the diversity of words. Higher values (closer to 1) make word selection more diverse,
defaults to 1

View File

@@ -17,7 +17,7 @@ class VertexAIEmbedding(EmbeddingBase):
self.embedding_types = {
"add": self.config.memory_add_embedding_type or "RETRIEVAL_DOCUMENT",
"update": self.config.memory_update_embedding_type or "RETRIEVAL_DOCUMENT",
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY"
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY",
}
credentials_path = self.config.vertex_credentials_json

View File

@@ -63,7 +63,6 @@ class OpenAILLM(LLMBase):
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
max_tokens: int = 100,
):
"""
Generate a response based on the given messages using OpenAI.
@@ -81,7 +80,7 @@ class OpenAILLM(LLMBase):
"model": self.config.model,
"messages": messages,
"temperature": self.config.temperature,
"max_tokens": max_tokens,
"max_tokens": self.config.max_tokens,
"top_p": self.config.top_p,
}