diff --git a/README.md b/README.md index 7f10b3f5..0dd0983f 100644 --- a/README.md +++ b/README.md @@ -495,6 +495,10 @@ _coming soon_ |template|custom template for prompt|Template|Template("Use the following pieces of context to answer the query at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. \$context Query: \$query Helpful Answer:")| |history|include conversation history from your client or database|any (recommendation: list[str])|None |stream|control if response is streamed back to the user|bool|False| +|model|OpenAI model|string|gpt-3.5-turbo-0613| +|temperature|creativity of the model (0-1)|float|0| +|max_tokens|limit maximum tokens used|int|1000| +|top_p|diversity of words used by the model (0-1)|float|1| #### **Chat Config** diff --git a/embedchain/config/ChatConfig.py b/embedchain/config/ChatConfig.py index 4e751745..0530aaa2 100644 --- a/embedchain/config/ChatConfig.py +++ b/embedchain/config/ChatConfig.py @@ -23,26 +23,26 @@ class ChatConfig(QueryConfig): """ Config for the `chat` method, inherits from `QueryConfig`. """ - - def __init__(self, template: Template = None, stream: bool = False): + def __init__(self, template: Template = None, model = None, temperature = None, max_tokens = None, top_p = None, stream: bool = False): """ Initializes the ChatConfig instance. - :param template: Optional. The `Template` instance to use as a - template for prompt. - :param stream: Optional. Control if response is streamed back to the - user - :raises ValueError: If the template is not valid as template should - contain $context and $query and $history + :param template: Optional. The `Template` instance to use as a template for prompt. + :param model: Optional. Controls the OpenAI model used. + :param temperature: Optional. Controls the randomness of the model's output. + Higher values (closer to 1) make output more random, lower values make it more deterministic. + :param max_tokens: Optional. Controls how many tokens are generated. + :param top_p: Optional. Controls the diversity of words. Higher values (closer to 1) make word selection more diverse, lower values make words less diverse. + :param stream: Optional. Control if response is streamed back to the user + :raises ValueError: If the template is not valid as template should contain $context and $query and $history """ if template is None: template = DEFAULT_PROMPT_TEMPLATE - # History is set as 0 to ensure that there is always a history, that - # way, there don't have to be two templates. - # Having two templates would make it complicated because the history - # is not user controlled. - super().__init__(template, history=[0], stream=stream) + + # History is set as 0 to ensure that there is always a history, that way, there don't have to be two templates. + # Having two templates would make it complicated because the history is not user controlled. + super().__init__(template, model=model, temperature=temperature, max_tokens=max_tokens, top_p=top_p, history=[0], stream=stream) def set_history(self, history): """ diff --git a/embedchain/config/QueryConfig.py b/embedchain/config/QueryConfig.py index c3ed210b..0bb06a3a 100644 --- a/embedchain/config/QueryConfig.py +++ b/embedchain/config/QueryConfig.py @@ -34,18 +34,21 @@ query_re = re.compile(r"\$\{*query\}*") context_re = re.compile(r"\$\{*context\}*") history_re = re.compile(r"\$\{*history\}*") - class QueryConfig(BaseConfig): """ Config for the `query` method. """ - def __init__(self, template: Template = None, history=None, stream: bool = False): + def __init__(self, template: Template = None, model = None, temperature = None, max_tokens = None, top_p = None, history = None, stream: bool = False): """ Initializes the QueryConfig instance. - :param template: Optional. The `Template` instance to use as a - template for prompt. + :param template: Optional. The `Template` instance to use as a template for prompt. + :param model: Optional. Controls the OpenAI model used. + :param temperature: Optional. Controls the randomness of the model's output. + Higher values (closer to 1) make output more random, lower values make it more deterministic. + :param max_tokens: Optional. Controls how many tokens are generated. + :param top_p: Optional. Controls the diversity of words. Higher values (closer to 1) make word selection more diverse, lower values make words less diverse. :param history: Optional. A list of strings to consider as history. :param stream: Optional. Control if response is streamed back to user :raises ValueError: If the template is not valid as template should @@ -65,6 +68,12 @@ class QueryConfig(BaseConfig): else: template = DEFAULT_PROMPT_WITH_HISTORY_TEMPLATE + + self.temperature = temperature if temperature else 0 + self.max_tokens = max_tokens if max_tokens else 1000 + self.model = model if model else "gpt-3.5-turbo-0613" + self.top_p = top_p if top_p else 1 + if self.validate_template(template): self.template = template else: diff --git a/embedchain/embedchain.py b/embedchain/embedchain.py index f98d4ace..0e7f60ee 100644 --- a/embedchain/embedchain.py +++ b/embedchain/embedchain.py @@ -112,11 +112,9 @@ class EmbedChain: documents, metadatas = zip(*data_dict.values()) chunks_before_addition = self.count() - self.collection.add(documents=documents, metadatas=list(metadatas), ids=ids) print( - f"Successfully saved {src}. New chunks count: {self.count() - chunks_before_addition}" # noqa:E501 - ) + f"Successfully saved {src}. New chunks count: {self.count() - chunks_before_addition}") # noqa:E501 def _format_result(self, results): return [ @@ -305,12 +303,12 @@ class App(EmbedChain): messages = [] messages.append({"role": "user", "content": prompt}) response = openai.ChatCompletion.create( - model="gpt-3.5-turbo-0613", + model = config.model, messages=messages, - temperature=0, - max_tokens=1000, - top_p=1, - stream=config.stream, + temperature = config.temperature, + max_tokens = config.max_tokens, + top_p=config.top_p, + stream=config.stream ) if config.stream: