diff --git a/docs/components/embedding-models.mdx b/docs/components/embedding-models.mdx index 4e362fb3..af24b89c 100644 --- a/docs/components/embedding-models.mdx +++ b/docs/components/embedding-models.mdx @@ -15,6 +15,7 @@ Embedchain supports several embedding models from the following providers: + ## OpenAI @@ -357,4 +358,31 @@ embedder: vector_dimension: 768 ``` + + +## Ollama + +Ollama enables the use of embedding models, allowing you to generate high-quality embeddings directly on your local machine. Make sure to install [Ollama](https://ollama.com/download) and keep it running before using the embedding model. + +You can find the list of models at [Ollama Embedding Models](https://ollama.com/blog/embedding-models). + +Below is an example of how to use embedding model Ollama: + + + +```python main.py +import os +from embedchain import App + +# load embedding model configuration from config.yaml file +app = App.from_config(config_path="config.yaml") +``` + +```yaml config.yaml +embedder: + provider: ollama + config: + model: 'all-minilm:latest' +``` + \ No newline at end of file diff --git a/embedchain/embedder/ollama.py b/embedchain/embedder/ollama.py index 41001114..a70e402e 100644 --- a/embedchain/embedder/ollama.py +++ b/embedchain/embedder/ollama.py @@ -1,16 +1,28 @@ +import logging from typing import Optional +try: + import ollama +except ImportError: + raise ImportError("Ollama Embedder requires extra dependencies. Install with `pip install ollama`") from None + from langchain_community.embeddings import OllamaEmbeddings from embedchain.config import OllamaEmbedderConfig from embedchain.embedder.base import BaseEmbedder from embedchain.models import VectorDimensions +logger = logging.getLogger(__name__) + class OllamaEmbedder(BaseEmbedder): def __init__(self, config: Optional[OllamaEmbedderConfig] = None): super().__init__(config=config) + local_models = ollama.list()["models"] + if not any(model.get("name") == self.config.model for model in local_models): + logger.info(f"Pulling {self.config.model} from Ollama!") + ollama.pull(self.config.model) embeddings = OllamaEmbeddings(model=self.config.model, base_url=self.config.base_url) embedding_fn = BaseEmbedder._langchain_default_concept(embeddings) self.set_embedding_fn(embedding_fn=embedding_fn)