feat: system prompt (#448)

This commit is contained in:
cachho
2023-08-16 21:57:01 +02:00
committed by GitHub
parent 7585bc557b
commit 849de5e8ab
9 changed files with 50 additions and 10 deletions

View File

@@ -1,5 +1,5 @@
import logging
from typing import List
from typing import List, Optional
from langchain.schema import BaseMessage
@@ -84,7 +84,7 @@ class CustomApp(EmbedChain):
if config.top_p and config.top_p != 1:
logging.warning("Config option `top_p` is not supported by this model.")
messages = CustomApp._get_messages(prompt)
messages = CustomApp._get_messages(prompt, system_prompt=config.system_prompt)
return chat(messages).content
@@ -97,7 +97,7 @@ class CustomApp(EmbedChain):
if config.max_tokens and config.max_tokens != 1000:
logging.warning("Config option `max_tokens` is not supported by this model.")
messages = CustomApp._get_messages(prompt)
messages = CustomApp._get_messages(prompt, system_prompt=config.system_prompt)
return chat(messages).content
@@ -110,7 +110,7 @@ class CustomApp(EmbedChain):
if config.top_p and config.top_p != 1:
logging.warning("Config option `top_p` is not supported by this model.")
messages = CustomApp._get_messages(prompt)
messages = CustomApp._get_messages(prompt, system_prompt=config.system_prompt)
return chat(messages).content
@@ -133,15 +133,19 @@ class CustomApp(EmbedChain):
if config.top_p and config.top_p != 1:
logging.warning("Config option `top_p` is not supported by this model.")
messages = CustomApp._get_messages(prompt)
messages = CustomApp._get_messages(prompt, system_prompt=config.system_prompt)
return chat(messages).content
@staticmethod
def _get_messages(prompt: str) -> List[BaseMessage]:
def _get_messages(prompt: str, system_prompt: Optional[str] = None) -> List[BaseMessage]:
from langchain.schema import HumanMessage, SystemMessage
return [SystemMessage(content="You are a helpful assistant."), HumanMessage(content=prompt)]
messages = []
if system_prompt:
messages.append(SystemMessage(content=system_prompt))
messages.append(HumanMessage(content=prompt))
return messages
def _stream_llm_model_response(self, response):
"""