Show details for query tokens (#1392)

This commit is contained in:
Dev Khant
2024-07-05 00:10:56 +05:30
committed by GitHub
parent ea09b5f7f0
commit 4880557d51
25 changed files with 1825 additions and 517 deletions

View File

@@ -1,6 +1,6 @@
import os
from collections.abc import Iterable
from typing import Optional, Union
from typing import Any, Optional, Union
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.stdout import StdOutCallbackHandler
@@ -25,8 +25,27 @@ class NvidiaLlm(BaseLlm):
if not self.config.api_key and "NVIDIA_API_KEY" not in os.environ:
raise ValueError("Please set the NVIDIA_API_KEY environment variable or pass it in the config.")
def get_llm_model_answer(self, prompt):
return self._get_answer(prompt=prompt, config=self.config)
def get_llm_model_answer(self, prompt) -> tuple[str, Optional[dict[str, Any]]]:
if self.config.token_usage:
response, token_info = self._get_answer(prompt, self.config)
model_name = "nvidia/" + self.config.model
if model_name not in self.config.model_pricing_map:
raise ValueError(
f"Model {model_name} not found in `model_prices_and_context_window.json`. \
You can disable token usage by setting `token_usage` to False."
)
total_cost = (
self.config.model_pricing_map[model_name]["input_cost_per_token"] * token_info["input_tokens"]
) + self.config.model_pricing_map[model_name]["output_cost_per_token"] * token_info["output_tokens"]
response_token_info = {
"prompt_tokens": token_info["input_tokens"],
"completion_tokens": token_info["output_tokens"],
"total_tokens": token_info["input_tokens"] + token_info["output_tokens"],
"total_cost": round(total_cost, 10),
"cost_currency": "USD",
}
return response, response_token_info
return self._get_answer(prompt, self.config)
@staticmethod
def _get_answer(prompt: str, config: BaseLlmConfig) -> Union[str, Iterable]:
@@ -43,4 +62,7 @@ class NvidiaLlm(BaseLlm):
if labels:
params["labels"] = labels
llm = ChatNVIDIA(**params, callback_manager=CallbackManager(callback_manager))
return llm.invoke(prompt).content if labels is None else llm.invoke(prompt, labels=labels).content
chat_response = llm.invoke(prompt) if labels is None else llm.invoke(prompt, labels=labels)
if config.token_usage:
return chat_response.content, chat_response.response_metadata["token_usage"]
return chat_response.content