Download Ollama model if not present (#1397)
This commit is contained in:
2
Makefile
2
Makefile
@@ -11,7 +11,7 @@ install:
|
|||||||
|
|
||||||
install_all:
|
install_all:
|
||||||
poetry install --all-extras
|
poetry install --all-extras
|
||||||
poetry run pip install pinecone-text pinecone-client langchain-anthropic "unstructured[local-inference, all-docs]"
|
poetry run pip install pinecone-text pinecone-client langchain-anthropic "unstructured[local-inference, all-docs]" ollama
|
||||||
|
|
||||||
install_es:
|
install_es:
|
||||||
poetry install --extras elasticsearch
|
poetry install --extras elasticsearch
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import logging
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
@@ -5,11 +6,14 @@ from langchain.callbacks.manager import CallbackManager
|
|||||||
from langchain.callbacks.stdout import StdOutCallbackHandler
|
from langchain.callbacks.stdout import StdOutCallbackHandler
|
||||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||||
from langchain_community.llms.ollama import Ollama
|
from langchain_community.llms.ollama import Ollama
|
||||||
|
from ollama import Client
|
||||||
|
|
||||||
from embedchain.config import BaseLlmConfig
|
from embedchain.config import BaseLlmConfig
|
||||||
from embedchain.helpers.json_serializable import register_deserializable
|
from embedchain.helpers.json_serializable import register_deserializable
|
||||||
from embedchain.llm.base import BaseLlm
|
from embedchain.llm.base import BaseLlm
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@register_deserializable
|
@register_deserializable
|
||||||
class OllamaLlm(BaseLlm):
|
class OllamaLlm(BaseLlm):
|
||||||
@@ -18,6 +22,12 @@ class OllamaLlm(BaseLlm):
|
|||||||
if self.config.model is None:
|
if self.config.model is None:
|
||||||
self.config.model = "llama2"
|
self.config.model = "llama2"
|
||||||
|
|
||||||
|
client = Client(host=config.base_url)
|
||||||
|
local_models = client.list()["models"]
|
||||||
|
if not any(model.get("name") == self.config.model for model in local_models):
|
||||||
|
logger.info(f"Pulling {self.config.model} from Ollama!")
|
||||||
|
client.pull(self.config.model)
|
||||||
|
|
||||||
def get_llm_model_answer(self, prompt):
|
def get_llm_model_answer(self, prompt):
|
||||||
return self._get_answer(prompt=prompt, config=self.config)
|
return self._get_answer(prompt=prompt, config=self.config)
|
||||||
|
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ def ollama_llm_config():
|
|||||||
|
|
||||||
|
|
||||||
def test_get_llm_model_answer(ollama_llm_config, mocker):
|
def test_get_llm_model_answer(ollama_llm_config, mocker):
|
||||||
|
mocker.patch("embedchain.llm.ollama.Client.list", return_value={"models": [{"name": "llama2"}]})
|
||||||
mocker.patch("embedchain.llm.ollama.OllamaLlm._get_answer", return_value="Test answer")
|
mocker.patch("embedchain.llm.ollama.OllamaLlm._get_answer", return_value="Test answer")
|
||||||
|
|
||||||
llm = OllamaLlm(ollama_llm_config)
|
llm = OllamaLlm(ollama_llm_config)
|
||||||
@@ -20,6 +21,7 @@ def test_get_llm_model_answer(ollama_llm_config, mocker):
|
|||||||
|
|
||||||
|
|
||||||
def test_get_answer_mocked_ollama(ollama_llm_config, mocker):
|
def test_get_answer_mocked_ollama(ollama_llm_config, mocker):
|
||||||
|
mocker.patch("embedchain.llm.ollama.Client.list", return_value={"models": [{"name": "llama2"}]})
|
||||||
mocked_ollama = mocker.patch("embedchain.llm.ollama.Ollama")
|
mocked_ollama = mocker.patch("embedchain.llm.ollama.Ollama")
|
||||||
mock_instance = mocked_ollama.return_value
|
mock_instance = mocked_ollama.return_value
|
||||||
mock_instance.invoke.return_value = "Mocked answer"
|
mock_instance.invoke.return_value = "Mocked answer"
|
||||||
|
|||||||
Reference in New Issue
Block a user