Feature: Custom Ollama endpoint base_url (#1301)
This commit is contained in:
@@ -5,6 +5,7 @@ llm:
|
||||
temperature: 0.5
|
||||
top_p: 1
|
||||
stream: true
|
||||
base_url: http://localhost:11434
|
||||
|
||||
embedder:
|
||||
provider: huggingface
|
||||
|
||||
@@ -96,6 +96,7 @@ class BaseLlmConfig(BaseConfig):
|
||||
endpoint: Optional[str] = None,
|
||||
model_kwargs: Optional[dict[str, Any]] = None,
|
||||
local: Optional[bool] = False,
|
||||
base_url: Optional[str] = None,
|
||||
):
|
||||
"""
|
||||
Initializes a configuration class instance for the LLM.
|
||||
@@ -169,6 +170,7 @@ class BaseLlmConfig(BaseConfig):
|
||||
self.endpoint = endpoint
|
||||
self.model_kwargs = model_kwargs
|
||||
self.local = local
|
||||
self.base_url = base_url
|
||||
|
||||
if isinstance(prompt, str):
|
||||
prompt = Template(prompt)
|
||||
|
||||
@@ -31,6 +31,7 @@ class OllamaLlm(BaseLlm):
|
||||
temperature=config.temperature,
|
||||
top_p=config.top_p,
|
||||
callback_manager=CallbackManager(callback_manager),
|
||||
base_url=config.base_url,
|
||||
)
|
||||
|
||||
return llm.invoke(prompt)
|
||||
|
||||
@@ -427,6 +427,7 @@ def validate_config(config_data):
|
||||
Optional("endpoint"): str,
|
||||
Optional("model_kwargs"): dict,
|
||||
Optional("local"): bool,
|
||||
Optional("base_url"): str,
|
||||
},
|
||||
},
|
||||
Optional("vectordb"): {
|
||||
|
||||
Reference in New Issue
Block a user