feat: add logging (#206)

This commit is contained in:
cachho
2023-07-10 19:58:58 +02:00
committed by GitHub
parent c597b1939d
commit b3cf834186
3 changed files with 28 additions and 3 deletions

View File

@@ -444,6 +444,7 @@ This section describes all possible config options.
|option|description|type|default| |option|description|type|default|
|---|---|---|---| |---|---|---|---|
|log_level|log level|string|WARNING|
|ef|embedding function|chromadb.utils.embedding_functions|{text-embedding-ada-002}| |ef|embedding function|chromadb.utils.embedding_functions|{text-embedding-ada-002}|
|db|vector database (experimental)|BaseVectorDB|ChromaDB| |db|vector database (experimental)|BaseVectorDB|ChromaDB|

View File

@@ -1,4 +1,5 @@
import os import os
import logging
from embedchain.config.BaseConfig import BaseConfig from embedchain.config.BaseConfig import BaseConfig
@@ -6,11 +7,15 @@ class InitConfig(BaseConfig):
""" """
Config to initialize an embedchain `App` instance. Config to initialize an embedchain `App` instance.
""" """
def __init__(self, ef=None, db=None):
def __init__(self, log_level=None, ef=None, db=None):
""" """
:param log_level: Optional. (String) Debug level ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'].
:param ef: Optional. Embedding function to use. :param ef: Optional. Embedding function to use.
:param db: Optional. (Vector) database to use for embeddings. :param db: Optional. (Vector) database to use for embeddings.
""" """
self._setup_logging(log_level)
# Embedding Function # Embedding Function
if ef is None: if ef is None:
from chromadb.utils import embedding_functions from chromadb.utils import embedding_functions
@@ -30,7 +35,18 @@ class InitConfig(BaseConfig):
return return
def _set_embedding_function(self, ef): def _set_embedding_function(self, ef):
self.ef = ef self.ef = ef
return return
def _setup_logging(self, debug_level):
level = logging.WARNING # Default level
if debug_level is not None:
level = getattr(logging, debug_level.upper(), None)
if not isinstance(level, int):
raise ValueError(f'Invalid log level: {debug_level}')
logging.basicConfig(format="%(asctime)s [%(name)s] [%(levelname)s] %(message)s",
level=level)
self.logger = logging.getLogger(__name__)
return

View File

@@ -1,5 +1,6 @@
import openai import openai
import os import os
import logging
from string import Template from string import Template
from chromadb.utils import embedding_functions from chromadb.utils import embedding_functions
@@ -181,7 +182,9 @@ class EmbedChain:
config = QueryConfig() config = QueryConfig()
context = self.retrieve_from_database(input_query) context = self.retrieve_from_database(input_query)
prompt = self.generate_prompt(input_query, context, config.template) prompt = self.generate_prompt(input_query, context, config.template)
logging.info(f"Prompt: {prompt}")
answer = self.get_answer_from_llm(prompt, config) answer = self.get_answer_from_llm(prompt, config)
logging.info(f"Answer: {answer}")
return answer return answer
def generate_chat_prompt(self, input_query, context, chat_history=''): def generate_chat_prompt(self, input_query, context, chat_history=''):
@@ -224,13 +227,16 @@ class EmbedChain:
context, context,
chat_history=chat_history, chat_history=chat_history,
) )
logging.info(f"Prompt: {prompt}")
answer = self.get_answer_from_llm(prompt, config) answer = self.get_answer_from_llm(prompt, config)
memory.chat_memory.add_user_message(input_query) memory.chat_memory.add_user_message(input_query)
if isinstance(answer, str): if isinstance(answer, str):
memory.chat_memory.add_ai_message(answer) memory.chat_memory.add_ai_message(answer)
logging.info(f"Answer: {answer}")
return answer return answer
else: else:
#this is a streamed response and needs to be handled differently #this is a streamed response and needs to be handled differently.
return self._stream_chat_response(answer) return self._stream_chat_response(answer)
def _stream_chat_response(self, answer): def _stream_chat_response(self, answer):
@@ -239,6 +245,7 @@ class EmbedChain:
streamed_answer.join(chunk) streamed_answer.join(chunk)
yield chunk yield chunk
memory.chat_memory.add_ai_message(streamed_answer) memory.chat_memory.add_ai_message(streamed_answer)
logging.info(f"Answer: {streamed_answer}")
def dry_run(self, input_query, config: QueryConfig = None): def dry_run(self, input_query, config: QueryConfig = None):
@@ -258,6 +265,7 @@ class EmbedChain:
config = QueryConfig() config = QueryConfig()
context = self.retrieve_from_database(input_query) context = self.retrieve_from_database(input_query)
prompt = self.generate_prompt(input_query, context, config.template) prompt = self.generate_prompt(input_query, context, config.template)
logging.info(f"Prompt: {prompt}")
return prompt return prompt
def count(self): def count(self):