[docs]: Revamp embedchain docs (#799)
This commit is contained in:
@@ -5,9 +5,9 @@ from langchain.memory import ConversationBufferMemory
|
||||
from langchain.schema import BaseMessage
|
||||
|
||||
from embedchain.config import BaseLlmConfig
|
||||
from embedchain.config.llm.base_llm_config import (
|
||||
DEFAULT_PROMPT, DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE,
|
||||
DOCS_SITE_PROMPT_TEMPLATE)
|
||||
from embedchain.config.llm.base import (DEFAULT_PROMPT,
|
||||
DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE,
|
||||
DOCS_SITE_PROMPT_TEMPLATE)
|
||||
from embedchain.helper.json_serializable import JSONSerializable
|
||||
|
||||
|
||||
@@ -174,7 +174,7 @@ class BaseLlm(JSONSerializable):
|
||||
:type input_query: str
|
||||
:param contexts: Embeddings retrieved from the database to be used as context.
|
||||
:type contexts: List[str]
|
||||
:param config: The `LlmConfig` instance to use as configuration options. This is used for one method call.
|
||||
:param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
|
||||
To persistently use a config, declare it during app init., defaults to None
|
||||
:type config: Optional[BaseLlmConfig], optional
|
||||
:param dry_run: A dry run does everything except send the resulting prompt to
|
||||
@@ -230,7 +230,7 @@ class BaseLlm(JSONSerializable):
|
||||
:type input_query: str
|
||||
:param contexts: Embeddings retrieved from the database to be used as context.
|
||||
:type contexts: List[str]
|
||||
:param config: The `LlmConfig` instance to use as configuration options. This is used for one method call.
|
||||
:param config: The `BaseLlmConfig` instance to use as configuration options. This is used for one method call.
|
||||
To persistently use a config, declare it during app init., defaults to None
|
||||
:type config: Optional[BaseLlmConfig], optional
|
||||
:param dry_run: A dry run does everything except send the resulting prompt to
|
||||
|
||||
@@ -30,11 +30,11 @@ class GPT4ALLLlm(BaseLlm):
|
||||
def _get_answer(self, prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
|
||||
if config.model and config.model != self.config.model:
|
||||
raise RuntimeError(
|
||||
"OpenSourceApp does not support switching models at runtime. Please create a new app instance."
|
||||
"GPT4ALLLlm does not support switching models at runtime. Please create a new app instance."
|
||||
)
|
||||
|
||||
if config.system_prompt:
|
||||
raise ValueError("OpenSourceApp does not support `system_prompt`")
|
||||
raise ValueError("GPT4ALLLlm does not support `system_prompt`")
|
||||
|
||||
response = self.instance.generate(
|
||||
prompt=prompt,
|
||||
|
||||
@@ -10,10 +10,10 @@ from embedchain.llm.base import BaseLlm
|
||||
|
||||
|
||||
@register_deserializable
|
||||
class HuggingFaceHubLlm(BaseLlm):
|
||||
class HuggingFaceLlm(BaseLlm):
|
||||
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
||||
if "HUGGINGFACEHUB_ACCESS_TOKEN" not in os.environ:
|
||||
raise ValueError("Please set the HUGGINGFACEHUB_ACCESS_TOKEN environment variable.")
|
||||
if "HUGGINGFACE_ACCESS_TOKEN" not in os.environ:
|
||||
raise ValueError("Please set the HUGGINGFACE_ACCESS_TOKEN environment variable.")
|
||||
|
||||
try:
|
||||
importlib.import_module("huggingface_hub")
|
||||
@@ -27,8 +27,8 @@ class HuggingFaceHubLlm(BaseLlm):
|
||||
|
||||
def get_llm_model_answer(self, prompt):
|
||||
if self.config.system_prompt:
|
||||
raise ValueError("HuggingFaceHubLlm does not support `system_prompt`")
|
||||
return HuggingFaceHubLlm._get_answer(prompt=prompt, config=self.config)
|
||||
raise ValueError("HuggingFaceLlm does not support `system_prompt`")
|
||||
return HuggingFaceLlm._get_answer(prompt=prompt, config=self.config)
|
||||
|
||||
@staticmethod
|
||||
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
|
||||
@@ -43,7 +43,7 @@ class HuggingFaceHubLlm(BaseLlm):
|
||||
raise ValueError("`top_p` must be > 0.0 and < 1.0")
|
||||
|
||||
llm = HuggingFaceHub(
|
||||
huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_ACCESS_TOKEN"],
|
||||
huggingfacehub_api_token=os.environ["HUGGINGFACE_ACCESS_TOKEN"],
|
||||
repo_id=config.model or "google/flan-t5-xxl",
|
||||
model_kwargs=model_kwargs,
|
||||
)
|
||||
@@ -7,12 +7,12 @@ from embedchain.llm.base import BaseLlm
|
||||
|
||||
|
||||
@register_deserializable
|
||||
class VertexAiLlm(BaseLlm):
|
||||
class VertexAILlm(BaseLlm):
|
||||
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
||||
super().__init__(config=config)
|
||||
|
||||
def get_llm_model_answer(self, prompt):
|
||||
return VertexAiLlm._get_answer(prompt=prompt, config=self.config)
|
||||
return VertexAILlm._get_answer(prompt=prompt, config=self.config)
|
||||
|
||||
@staticmethod
|
||||
def _get_answer(prompt: str, config: BaseLlmConfig) -> str:
|
||||
|
||||
Reference in New Issue
Block a user