refactor: app design concept (#305)
This commit is contained in:
49
embedchain/apps/App.py
Normal file
49
embedchain/apps/App.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import openai
|
||||
|
||||
from embedchain.config import AppConfig, ChatConfig
|
||||
from embedchain.embedchain import EmbedChain
|
||||
|
||||
|
||||
class App(EmbedChain):
|
||||
"""
|
||||
The EmbedChain app.
|
||||
Has two functions: add and query.
|
||||
|
||||
adds(data_type, url): adds the data from the given URL to the vector db.
|
||||
query(query): finds answer to the given query using vector database and LLM.
|
||||
dry_run(query): test your prompt without consuming tokens.
|
||||
"""
|
||||
|
||||
def __init__(self, config: AppConfig = None):
|
||||
"""
|
||||
:param config: AppConfig instance to load as configuration. Optional.
|
||||
"""
|
||||
if config is None:
|
||||
config = AppConfig()
|
||||
|
||||
super().__init__(config)
|
||||
|
||||
def get_llm_model_answer(self, prompt, config: ChatConfig):
|
||||
messages = []
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
response = openai.ChatCompletion.create(
|
||||
model=config.model,
|
||||
messages=messages,
|
||||
temperature=config.temperature,
|
||||
max_tokens=config.max_tokens,
|
||||
top_p=config.top_p,
|
||||
stream=config.stream,
|
||||
)
|
||||
|
||||
if config.stream:
|
||||
return self._stream_llm_model_response(response)
|
||||
else:
|
||||
return response["choices"][0]["message"]["content"]
|
||||
|
||||
def _stream_llm_model_response(self, response):
|
||||
"""
|
||||
This is a generator for streaming response from the OpenAI completions API
|
||||
"""
|
||||
for line in response:
|
||||
chunk = line["choices"][0].get("delta", {}).get("content", "")
|
||||
yield chunk
|
||||
39
embedchain/apps/OpenSourceApp.py
Normal file
39
embedchain/apps/OpenSourceApp.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import logging
|
||||
|
||||
from embedchain.config import ChatConfig, OpenSourceAppConfig
|
||||
from embedchain.embedchain import EmbedChain
|
||||
|
||||
gpt4all_model = None
|
||||
|
||||
|
||||
class OpenSourceApp(EmbedChain):
|
||||
"""
|
||||
The OpenSource app.
|
||||
Same as App, but uses an open source embedding model and LLM.
|
||||
|
||||
Has two function: add and query.
|
||||
|
||||
adds(data_type, url): adds the data from the given URL to the vector db.
|
||||
query(query): finds answer to the given query using vector database and LLM.
|
||||
"""
|
||||
|
||||
def __init__(self, config: OpenSourceAppConfig = None):
|
||||
"""
|
||||
:param config: InitConfig instance to load as configuration. Optional.
|
||||
`ef` defaults to open source.
|
||||
"""
|
||||
logging.info("Loading open source embedding model. This may take some time...") # noqa:E501
|
||||
if not config:
|
||||
config = OpenSourceAppConfig()
|
||||
|
||||
logging.info("Successfully loaded open source embedding model.")
|
||||
super().__init__(config)
|
||||
|
||||
def get_llm_model_answer(self, prompt, config: ChatConfig):
|
||||
from gpt4all import GPT4All
|
||||
|
||||
global gpt4all_model
|
||||
if gpt4all_model is None:
|
||||
gpt4all_model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin")
|
||||
response = gpt4all_model.generate(prompt=prompt, streaming=config.stream)
|
||||
return response
|
||||
65
embedchain/apps/PersonApp.py
Normal file
65
embedchain/apps/PersonApp.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from string import Template
|
||||
|
||||
from embedchain.apps.App import App
|
||||
from embedchain.apps.OpenSourceApp import OpenSourceApp
|
||||
from embedchain.config import ChatConfig, QueryConfig
|
||||
from embedchain.config.apps.BaseAppConfig import BaseAppConfig
|
||||
from embedchain.config.QueryConfig import (DEFAULT_PROMPT,
|
||||
DEFAULT_PROMPT_WITH_HISTORY)
|
||||
|
||||
|
||||
class EmbedChainPersonApp:
|
||||
"""
|
||||
Base class to create a person bot.
|
||||
This bot behaves and speaks like a person.
|
||||
|
||||
:param person: name of the person, better if its a well known person.
|
||||
:param config: BaseAppConfig instance to load as configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, person, config: BaseAppConfig = None):
|
||||
self.person = person
|
||||
self.person_prompt = f"You are {person}. Whatever you say, you will always say in {person} style." # noqa:E501
|
||||
if config is None:
|
||||
config = BaseAppConfig()
|
||||
super().__init__(config)
|
||||
|
||||
|
||||
class PersonApp(EmbedChainPersonApp, App):
|
||||
"""
|
||||
The Person app.
|
||||
Extends functionality from EmbedChainPersonApp and App
|
||||
"""
|
||||
|
||||
def query(self, input_query, config: QueryConfig = None):
|
||||
self.template = Template(self.person_prompt + " " + DEFAULT_PROMPT)
|
||||
query_config = QueryConfig(
|
||||
template=self.template,
|
||||
)
|
||||
return super().query(input_query, query_config)
|
||||
|
||||
def chat(self, input_query, config: ChatConfig = None):
|
||||
self.template = Template(self.person_prompt + " " + DEFAULT_PROMPT_WITH_HISTORY)
|
||||
chat_config = ChatConfig(
|
||||
template=self.template,
|
||||
)
|
||||
return super().chat(input_query, chat_config)
|
||||
|
||||
|
||||
class PersonOpenSourceApp(EmbedChainPersonApp, OpenSourceApp):
|
||||
"""
|
||||
The Person app.
|
||||
Extends functionality from EmbedChainPersonApp and OpenSourceApp
|
||||
"""
|
||||
|
||||
def query(self, input_query, config: QueryConfig = None):
|
||||
query_config = QueryConfig(
|
||||
template=self.template,
|
||||
)
|
||||
return super().query(input_query, query_config)
|
||||
|
||||
def chat(self, input_query, config: ChatConfig = None):
|
||||
chat_config = ChatConfig(
|
||||
template=self.template,
|
||||
)
|
||||
return super().chat(input_query, chat_config)
|
||||
0
embedchain/apps/__init__.py
Normal file
0
embedchain/apps/__init__.py
Normal file
Reference in New Issue
Block a user