Fixes pytests openai: args change and pathlib reference for pricing file (#1602)

This commit is contained in:
Pranav Puranik
2024-07-31 09:56:23 -05:00
committed by GitHub
parent 47afe52296
commit abd4ec64eb
3 changed files with 25 additions and 38 deletions

View File

@@ -3,6 +3,7 @@ import logging
import re import re
from string import Template from string import Template
from typing import Any, Mapping, Optional, Dict, Union from typing import Any, Mapping, Optional, Dict, Union
from pathlib import Path
import httpx import httpx
@@ -234,8 +235,8 @@ class BaseLlmConfig(BaseConfig):
self.api_version = api_version self.api_version = api_version
if token_usage: if token_usage:
f = open("embedchain/config/model_prices_and_context_window.json") f = Path(__file__).resolve().parent.parent / "model_prices_and_context_window.json"
self.model_pricing_map = json.load(f) self.model_pricing_map = json.load(f.open())
if isinstance(prompt, str): if isinstance(prompt, str):
prompt = Template(prompt) prompt = Template(prompt)

40
embedchain/poetry.lock generated
View File

@@ -1020,7 +1020,7 @@ tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"]
name = "eval-type-backport" name = "eval-type-backport"
version = "0.2.0" version = "0.2.0"
description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." description = "Like `typing._eval_type`, but lets older Python versions use newer typing features."
optional = false optional = true
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"},
@@ -1762,25 +1762,6 @@ files = [
docs = ["Sphinx", "furo"] docs = ["Sphinx", "furo"]
test = ["objgraph", "psutil"] test = ["objgraph", "psutil"]
[[package]]
name = "groq"
version = "0.9.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.7"
files = [
{file = "groq-0.9.0-py3-none-any.whl", hash = "sha256:d0e46f4ad645504672bb09c8100af3ced3a7db0d5119dc13e4aca535fc455874"},
{file = "groq-0.9.0.tar.gz", hash = "sha256:130ed5e35d3acfaab46b9e7a078eeaebf91052f4a9d71f86f87fb319b5fec332"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1"
pydantic = ">=1.9.0,<3"
sniffio = "*"
typing-extensions = ">=4.7,<5"
[[package]] [[package]]
name = "grpc-google-iam-v1" name = "grpc-google-iam-v1"
version = "0.13.1" version = "0.13.1"
@@ -2721,23 +2702,20 @@ files = [
[[package]] [[package]]
name = "mem0ai" name = "mem0ai"
version = "0.0.5" version = "0.0.9"
description = "Long-term memory for AI Agents" description = "Long-term memory for AI Agents"
optional = false optional = false
python-versions = "<4.0,>=3.8" python-versions = "<4.0,>=3.8"
files = [ files = [
{file = "mem0ai-0.0.5-py3-none-any.whl", hash = "sha256:6f6e5356fd522adf0510322cd581476ea456fd7ccefca11b5ac050e9a6f00f36"}, {file = "mem0ai-0.0.9-py3-none-any.whl", hash = "sha256:d4de435729af4fd3d597d022ffb2af89a0630d6c3b4769792bbe27d2ce816858"},
{file = "mem0ai-0.0.5.tar.gz", hash = "sha256:f2ac35d15e4e620becb8d06b8ebeb1ffa85fac0b7cb2d3138056babec48dd5dd"}, {file = "mem0ai-0.0.9.tar.gz", hash = "sha256:e4374d5d04aa3f543cd3325f700e4b62f5358ae1c6fa5c44b2ff790c10c4e5f1"},
] ]
[package.dependencies] [package.dependencies]
boto3 = ">=1.34.144,<2.0.0"
groq = ">=0.9.0,<0.10.0"
openai = ">=1.33.0,<2.0.0" openai = ">=1.33.0,<2.0.0"
posthog = ">=3.5.0,<4.0.0" posthog = ">=3.5.0,<4.0.0"
pydantic = ">=2.7.3,<3.0.0" pydantic = ">=2.7.3,<3.0.0"
qdrant-client = ">=1.9.1,<2.0.0" qdrant-client = ">=1.9.1,<2.0.0"
together = ">=1.2.1,<2.0.0"
[[package]] [[package]]
name = "milvus-lite" name = "milvus-lite"
@@ -2748,6 +2726,7 @@ python-versions = ">=3.7"
files = [ files = [
{file = "milvus_lite-2.4.8-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:b7e90b34b214884cd44cdc112ab243d4cb197b775498355e2437b6cafea025fe"}, {file = "milvus_lite-2.4.8-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:b7e90b34b214884cd44cdc112ab243d4cb197b775498355e2437b6cafea025fe"},
{file = "milvus_lite-2.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:519dfc62709d8f642d98a1c5b1dcde7080d107e6e312d677fef5a3412a40ac08"}, {file = "milvus_lite-2.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:519dfc62709d8f642d98a1c5b1dcde7080d107e6e312d677fef5a3412a40ac08"},
{file = "milvus_lite-2.4.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:b21f36d24cbb0e920b4faad607019bb28c1b2c88b4d04680ac8c7697a4ae8a4d"},
{file = "milvus_lite-2.4.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:08332a2b9abfe7c4e1d7926068937e46f8fb81f2707928b7bc02c9dc99cebe41"}, {file = "milvus_lite-2.4.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:08332a2b9abfe7c4e1d7926068937e46f8fb81f2707928b7bc02c9dc99cebe41"},
] ]
@@ -3260,6 +3239,7 @@ description = "Nvidia JIT LTO Library"
optional = true optional = true
python-versions = ">=3" python-versions = ">=3"
files = [ files = [
{file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_aarch64.whl", hash = "sha256:98103729cc5226e13ca319a10bbf9433bbbd44ef64fe72f45f067cacc14b8d27"},
{file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f9b37bc5c8cf7509665cb6ada5aaa0ce65618f2332b7d3e78e9790511f111212"}, {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f9b37bc5c8cf7509665cb6ada5aaa0ce65618f2332b7d3e78e9790511f111212"},
{file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-win_amd64.whl", hash = "sha256:e782564d705ff0bf61ac3e1bf730166da66dd2fe9012f111ede5fc49b64ae697"}, {file = "nvidia_nvjitlink_cu12-12.5.82-py3-none-win_amd64.whl", hash = "sha256:e782564d705ff0bf61ac3e1bf730166da66dd2fe9012f111ede5fc49b64ae697"},
] ]
@@ -3726,7 +3706,7 @@ files = [
name = "pillow" name = "pillow"
version = "10.4.0" version = "10.4.0"
description = "Python Imaging Library (Fork)" description = "Python Imaging Library (Fork)"
optional = false optional = true
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"},
@@ -4108,7 +4088,7 @@ files = [
name = "pyarrow" name = "pyarrow"
version = "15.0.0" version = "15.0.0"
description = "Python library for Apache Arrow" description = "Python library for Apache Arrow"
optional = false optional = true
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"}, {file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"},
@@ -5555,7 +5535,7 @@ blobfile = ["blobfile (>=2)"]
name = "together" name = "together"
version = "1.2.1" version = "1.2.1"
description = "Python client for Together's Cloud Platform!" description = "Python client for Together's Cloud Platform!"
optional = false optional = true
python-versions = "<4.0,>=3.8" python-versions = "<4.0,>=3.8"
files = [ files = [
{file = "together-1.2.1-py3-none-any.whl", hash = "sha256:a94408074e0e50b3dab1d4001cb36a3fdbd0e4d6a0e659ecaae6b7b6355f5369"}, {file = "together-1.2.1-py3-none-any.whl", hash = "sha256:a94408074e0e50b3dab1d4001cb36a3fdbd0e4d6a0e659ecaae6b7b6355f5369"},
@@ -6605,4 +6585,4 @@ weaviate = ["weaviate-client"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.9,<=3.13" python-versions = ">=3.9,<=3.13"
content-hash = "7857242f71bf8dd9374a9b244a2c0ca88ff455c5b83b478f65ed59b1958a69e6" content-hash = "8197f676b36fed2bf02f33cd15e83c3e6640ae5ba216210af5777ab2dc139480"

View File

@@ -114,7 +114,8 @@ def test_get_llm_model_answer_without_system_prompt(config, mocker):
model=config.model, model=config.model,
temperature=config.temperature, temperature=config.temperature,
max_tokens=config.max_tokens, max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p}, model_kwargs={},
top_p= config.top_p,
api_key=os.environ["OPENAI_API_KEY"], api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"], base_url=os.environ["OPENAI_API_BASE"],
http_client=None, http_client=None,
@@ -133,7 +134,8 @@ def test_get_llm_model_answer_with_special_headers(config, mocker):
model=config.model, model=config.model,
temperature=config.temperature, temperature=config.temperature,
max_tokens=config.max_tokens, max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p}, model_kwargs={},
top_p= config.top_p,
api_key=os.environ["OPENAI_API_KEY"], api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"], base_url=os.environ["OPENAI_API_BASE"],
default_headers={"test": "test"}, default_headers={"test": "test"},
@@ -153,7 +155,8 @@ def test_get_llm_model_answer_with_model_kwargs(config, mocker):
model=config.model, model=config.model,
temperature=config.temperature, temperature=config.temperature,
max_tokens=config.max_tokens, max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p, "response_format": {"type": "json_object"}}, model_kwargs={"response_format": {"type": "json_object"}},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"], api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"], base_url=os.environ["OPENAI_API_BASE"],
http_client=None, http_client=None,
@@ -181,7 +184,8 @@ def test_get_llm_model_answer_with_tools(config, mocker, mock_return, expected):
model=config.model, model=config.model,
temperature=config.temperature, temperature=config.temperature,
max_tokens=config.max_tokens, max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p}, model_kwargs={},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"], api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"], base_url=os.environ["OPENAI_API_BASE"],
http_client=None, http_client=None,
@@ -218,7 +222,8 @@ def test_get_llm_model_answer_with_http_client_proxies(env_config, mocker):
model=config.model, model=config.model,
temperature=config.temperature, temperature=config.temperature,
max_tokens=config.max_tokens, max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p}, model_kwargs={},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"], api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"], base_url=os.environ["OPENAI_API_BASE"],
http_client=mock_http_client_instance, http_client=mock_http_client_instance,
@@ -252,7 +257,8 @@ def test_get_llm_model_answer_with_http_async_client_proxies(env_config, mocker)
model=config.model, model=config.model,
temperature=config.temperature, temperature=config.temperature,
max_tokens=config.max_tokens, max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p}, model_kwargs={},
top_p=config.top_p,
api_key=os.environ["OPENAI_API_KEY"], api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"], base_url=os.environ["OPENAI_API_BASE"],
http_client=None, http_client=None,