feat: History API (client) (#204)

This commit is contained in:
cachho
2023-07-10 20:15:20 +02:00
committed by GitHub
parent b3cf834186
commit 65414af098
4 changed files with 111 additions and 41 deletions

View File

@@ -11,6 +11,7 @@ from langchain.memory import ConversationBufferMemory
from embedchain.config import InitConfig, AddConfig, QueryConfig, ChatConfig
from embedchain.config.QueryConfig import DEFAULT_PROMPT
from embedchain.data_formatter import DataFormatter
from string import Template
gpt4all_model = None
@@ -144,16 +145,19 @@ class EmbedChain:
content = ""
return content
def generate_prompt(self, input_query, context, template: Template = None):
def generate_prompt(self, input_query, context, config: QueryConfig):
"""
Generates a prompt based on the given query and context, ready to be passed to an LLM
:param input_query: The query to use.
:param context: Similar documents to the query used as context.
:param template: Optional. The `Template` instance to use as a template for prompt.
:param config: Optional. The `QueryConfig` instance to use as configuration options.
:return: The prompt
"""
prompt = template.substitute(context = context, query = input_query)
if not config.history:
prompt = config.template.substitute(context = context, query = input_query)
else:
prompt = config.template.substitute(context = context, query = input_query, history = config.history)
return prompt
def get_answer_from_llm(self, prompt, config: ChatConfig):
@@ -181,30 +185,12 @@ class EmbedChain:
if config is None:
config = QueryConfig()
context = self.retrieve_from_database(input_query)
prompt = self.generate_prompt(input_query, context, config.template)
prompt = self.generate_prompt(input_query, context, config)
logging.info(f"Prompt: {prompt}")
answer = self.get_answer_from_llm(prompt, config)
logging.info(f"Answer: {answer}")
return answer
def generate_chat_prompt(self, input_query, context, chat_history=''):
"""
Generates a prompt based on the given query, context and chat history
for chat interface. This is then passed to an LLM.
:param input_query: The query to use.
:param context: Similar documents to the query used as context.
:param chat_history: User and bot conversation that happened before.
:return: The prompt
"""
prefix_prompt = f"""You are a chatbot having a conversation with a human. You are given chat history and context. You need to answer the query considering context, chat history and your knowledge base. If you don't know the answer or the answer is neither contained in the context nor in history, then simply say "I don't know"."""
chat_history_prompt = f"""\n----\nChat History: {chat_history}\n----"""
suffix_prompt = f"""\n####\nContext: {context}\n####\nQuery: {input_query}\nHelpful Answer:"""
prompt = prefix_prompt
if chat_history:
prompt += chat_history_prompt
prompt += suffix_prompt
return prompt
def chat(self, input_query, config: ChatConfig = None):
"""
@@ -217,18 +203,19 @@ class EmbedChain:
:param config: Optional. The `ChatConfig` instance to use as configuration options.
:return: The answer to the query.
"""
if config is None:
config = ChatConfig()
context = self.retrieve_from_database(input_query)
global memory
chat_history = memory.load_memory_variables({})["history"]
prompt = self.generate_chat_prompt(
input_query,
context,
chat_history=chat_history,
)
if config is None:
config = ChatConfig()
if chat_history:
config.set_history(chat_history)
prompt = self.generate_prompt(input_query, context, config)
logging.info(f"Prompt: {prompt}")
answer = self.get_answer_from_llm(prompt, config)
memory.chat_memory.add_user_message(input_query)
if isinstance(answer, str):
@@ -264,7 +251,7 @@ class EmbedChain:
if config is None:
config = QueryConfig()
context = self.retrieve_from_database(input_query)
prompt = self.generate_prompt(input_query, context, config.template)
prompt = self.generate_prompt(input_query, context, config)
logging.info(f"Prompt: {prompt}")
return prompt