Files
t6_mem0/mem0/embeddings/ollama.py
2024-09-16 17:39:54 -07:00

56 lines
1.8 KiB
Python

import subprocess
import sys
from typing import Optional
from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase
try:
from ollama import Client
except ImportError:
user_input = input("The 'ollama' library is required. Install it now? [y/N]: ")
if user_input.lower() == "y":
try:
subprocess.check_call([sys.executable, "-m", "pip", "install", "ollama"])
from ollama import Client
except subprocess.CalledProcessError:
print("Failed to install 'ollama'. Please install it manually using 'pip install ollama'.")
sys.exit(1)
else:
print("The required 'ollama' library is not installed.")
sys.exit(1)
class OllamaEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)
if not self.config.model:
self.config.model = "nomic-embed-text"
if not self.config.embedding_dims:
self.config.embedding_dims = 512
self.client = Client(host=self.config.ollama_base_url)
self._ensure_model_exists()
def _ensure_model_exists(self):
"""
Ensure the specified model exists locally. If not, pull it from Ollama.
"""
local_models = self.client.list()["models"]
if not any(model.get("name") == self.config.model for model in local_models):
self.client.pull(self.config.model)
def embed(self, text):
"""
Get the embedding for the given text using Ollama.
Args:
text (str): The text to embed.
Returns:
list: The embedding vector.
"""
response = self.client.embeddings(model=self.config.model, prompt=text)
return response["embedding"]