Rename embedchain to mem0 and open sourcing code for long term memory (#1474)
Co-authored-by: Deshraj Yadav <deshrajdry@gmail.com>
This commit is contained in:
29
mem0/llms/ollama.py
Normal file
29
mem0/llms/ollama.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import ollama
|
||||
from llm.base import LLMBase
|
||||
|
||||
|
||||
class OllamaLLM(LLMBase):
|
||||
def __init__(self, model="llama3"):
|
||||
self.model = model
|
||||
self._ensure_model_exists()
|
||||
|
||||
def _ensure_model_exists(self):
|
||||
"""
|
||||
Ensure the specified model exists locally. If not, pull it from Ollama.
|
||||
"""
|
||||
model_list = [m["name"] for m in ollama.list()["models"]]
|
||||
if not any(m.startswith(self.model) for m in model_list):
|
||||
ollama.pull(self.model)
|
||||
|
||||
def generate_response(self, messages):
|
||||
"""
|
||||
Generate a response based on the given messages using Ollama.
|
||||
|
||||
Args:
|
||||
messages (list): List of message dicts containing 'role' and 'content'.
|
||||
|
||||
Returns:
|
||||
str: The generated response.
|
||||
"""
|
||||
response = ollama.chat(model=self.model, messages=messages)
|
||||
return response["message"]["content"]
|
||||
Reference in New Issue
Block a user