Rename embedchain to mem0 and open sourcing code for long term memory (#1474)

Co-authored-by: Deshraj Yadav <deshrajdry@gmail.com>
This commit is contained in:
Taranjeet Singh
2024-07-12 07:51:33 -07:00
committed by GitHub
parent 83e8c97295
commit f842a92e25
665 changed files with 9427 additions and 6592 deletions

29
mem0/llms/ollama.py Normal file
View File

@@ -0,0 +1,29 @@
import ollama
from llm.base import LLMBase
class OllamaLLM(LLMBase):
def __init__(self, model="llama3"):
self.model = model
self._ensure_model_exists()
def _ensure_model_exists(self):
"""
Ensure the specified model exists locally. If not, pull it from Ollama.
"""
model_list = [m["name"] for m in ollama.list()["models"]]
if not any(m.startswith(self.model) for m in model_list):
ollama.pull(self.model)
def generate_response(self, messages):
"""
Generate a response based on the given messages using Ollama.
Args:
messages (list): List of message dicts containing 'role' and 'content'.
Returns:
str: The generated response.
"""
response = ollama.chat(model=self.model, messages=messages)
return response["message"]["content"]