Reverting the tools commit (#2404)
This commit is contained in:
@@ -3,56 +3,77 @@ from typing import Dict, List, Optional
|
||||
try:
|
||||
from ollama import Client
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The 'ollama' library is required. Please install it using 'pip install ollama'."
|
||||
)
|
||||
raise ImportError("The 'ollama' library is required. Please install it using 'pip install ollama'.")
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
|
||||
|
||||
class OllamaLLM(LLMBase):
|
||||
"""
|
||||
A class for interacting with Ollama's language models using the specified configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
||||
"""
|
||||
Initializes the OllamaLLM instance with the given configuration.
|
||||
|
||||
Args:
|
||||
config (Optional[BaseLlmConfig]): Configuration settings for the language model.
|
||||
"""
|
||||
super().__init__(config)
|
||||
|
||||
if not self.config.model:
|
||||
self.config.model = "llama3.1:70b"
|
||||
|
||||
self.client = Client(host=self.config.ollama_base_url)
|
||||
self._ensure_model_exists()
|
||||
|
||||
def _ensure_model_exists(self):
|
||||
"""
|
||||
Ensures the specified model exists locally. If not, pulls it from Ollama.
|
||||
Ensure the specified model exists locally. If not, pull it from Ollama.
|
||||
"""
|
||||
local_models = self.client.list()["models"]
|
||||
if not any(model.get("name") == self.config.model for model in local_models):
|
||||
self.client.pull(self.config.model)
|
||||
|
||||
def _parse_response(self, response, tools):
|
||||
"""
|
||||
Process the response based on whether tools are used or not.
|
||||
|
||||
Args:
|
||||
response: The raw response from API.
|
||||
tools: The list of tools provided in the request.
|
||||
|
||||
Returns:
|
||||
str or dict: The processed response.
|
||||
"""
|
||||
if tools:
|
||||
processed_response = {
|
||||
"content": response["message"]["content"],
|
||||
"tool_calls": [],
|
||||
}
|
||||
|
||||
if response["message"].get("tool_calls"):
|
||||
for tool_call in response["message"]["tool_calls"]:
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call["function"]["name"],
|
||||
"arguments": tool_call["function"]["arguments"],
|
||||
}
|
||||
)
|
||||
|
||||
return processed_response
|
||||
else:
|
||||
return response["message"]["content"]
|
||||
|
||||
def generate_response(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
response_format: Optional[str] = None,
|
||||
) -> str:
|
||||
response_format=None,
|
||||
tools: Optional[List[Dict]] = None,
|
||||
tool_choice: str = "auto",
|
||||
):
|
||||
"""
|
||||
Generates a response using Ollama based on the provided messages.
|
||||
Generate a response based on the given messages using OpenAI.
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, str]]): A list of dictionaries, each containing a 'role' and 'content' key.
|
||||
response_format (Optional[str]): The desired format of the response. Defaults to None.
|
||||
messages (list): List of message dicts containing 'role' and 'content'.
|
||||
response_format (str or object, optional): Format of the response. Defaults to "text".
|
||||
tools (list, optional): List of tools that the model can call. Defaults to None.
|
||||
tool_choice (str, optional): Tool choice method. Defaults to "auto".
|
||||
|
||||
Returns:
|
||||
str: The generated response from the model.
|
||||
str: The generated response.
|
||||
"""
|
||||
params = {
|
||||
"model": self.config.model,
|
||||
@@ -66,5 +87,8 @@ class OllamaLLM(LLMBase):
|
||||
if response_format:
|
||||
params["format"] = "json"
|
||||
|
||||
if tools:
|
||||
params["tools"] = tools
|
||||
|
||||
response = self.client.chat(**params)
|
||||
return response["message"]["content"]
|
||||
return self._parse_response(response, tools)
|
||||
|
||||
Reference in New Issue
Block a user