From fee3c27af3693eff0c49a8eb3812016a7bb3bc1c Mon Sep 17 00:00:00 2001 From: Pranav Puranik <54378813+PranavPuranik@users.noreply.github.com> Date: Thu, 29 Aug 2024 04:48:50 -0500 Subject: [PATCH] Adding proxy server settings to azure openai (#1753) --- docs/components/embedders/config.mdx | 1 + docs/components/llms/config.mdx | 1 + mem0/configs/embeddings/base.py | 11 ++++++++++- mem0/configs/llms/base.py | 12 +++++++++++- mem0/embeddings/azure_openai.py | 2 +- mem0/llms/azure_openai.py | 3 +-- tests/llms/test_azure_openai.py | 22 +++++++++++++++++++++- 7 files changed, 46 insertions(+), 6 deletions(-) diff --git a/docs/components/embedders/config.mdx b/docs/components/embedders/config.mdx index 32ffc287..1057bb39 100644 --- a/docs/components/embedders/config.mdx +++ b/docs/components/embedders/config.mdx @@ -48,6 +48,7 @@ Here's a comprehensive list of all parameters that can be used across different | `model` | Embedding model to use | | `api_key` | API key of the provider | | `embedding_dims` | Dimensions of the embedding model | +| `http_client_proxies` | Allow proxy server settings | | `ollama_base_url` | Base URL for the Ollama embedding model | | `model_kwargs` | Key-Value arguments for the Huggingface embedding model | diff --git a/docs/components/llms/config.mdx b/docs/components/llms/config.mdx index 2cf296ba..89f9562c 100644 --- a/docs/components/llms/config.mdx +++ b/docs/components/llms/config.mdx @@ -53,6 +53,7 @@ Here's the table based on the provided parameters: | `max_tokens` | Tokens to generate | All | | `top_p` | Probability threshold for nucleus sampling | All | | `top_k` | Number of highest probability tokens to keep | All | +| `http_client_proxies`| Allow proxy server settings | AzureOpenAI | | `models` | List of models | Openrouter | | `route` | Routing strategy | Openrouter | | `openrouter_base_url`| Base URL for Openrouter API | Openrouter | diff --git a/mem0/configs/embeddings/base.py b/mem0/configs/embeddings/base.py index 20083e1e..68cfb033 100644 --- a/mem0/configs/embeddings/base.py +++ b/mem0/configs/embeddings/base.py @@ -1,5 +1,7 @@ from abc import ABC -from typing import Optional +from typing import Optional, Union, Dict + +import httpx class BaseEmbedderConfig(ABC): @@ -18,6 +20,8 @@ class BaseEmbedderConfig(ABC): openai_base_url: Optional[str] = None, # Huggingface specific model_kwargs: Optional[dict] = None, + # AzureOpenAI specific + http_client_proxies: Optional[Union[Dict, str]] = None, ): """ Initializes a configuration class instance for the Embeddings. @@ -34,6 +38,8 @@ class BaseEmbedderConfig(ABC): :type model_kwargs: Optional[Dict[str, Any]], defaults a dict inside init :param openai_base_url: Openai base URL to be use, defaults to "https://api.openai.com/v1" :type openai_base_url: Optional[str], optional + :param http_client_proxies: The proxy server settings used to create self.http_client, defaults to None + :type http_client_proxies: Optional[Dict | str], optional """ self.model = model @@ -41,6 +47,9 @@ class BaseEmbedderConfig(ABC): self.openai_base_url = openai_base_url self.embedding_dims = embedding_dims + # AzureOpenAI specific + self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None + # Ollama specific self.ollama_base_url = ollama_base_url diff --git a/mem0/configs/llms/base.py b/mem0/configs/llms/base.py index 3bc67b2e..32a7096c 100644 --- a/mem0/configs/llms/base.py +++ b/mem0/configs/llms/base.py @@ -1,5 +1,7 @@ from abc import ABC -from typing import Optional +from typing import Optional, Union, Dict + +import httpx class BaseLlmConfig(ABC): @@ -25,6 +27,9 @@ class BaseLlmConfig(ABC): app_name: Optional[str] = None, # Ollama specific ollama_base_url: Optional[str] = None, + + # AzureOpenAI specific + http_client_proxies: Optional[Union[Dict, str]] = None, ): """ Initializes a configuration class instance for the LLM. @@ -57,6 +62,8 @@ class BaseLlmConfig(ABC): :type ollama_base_url: Optional[str], optional :param openai_base_url: Openai base URL to be use, defaults to "https://api.openai.com/v1" :type openai_base_url: Optional[str], optional + :param http_client_proxies: The proxy server(s) settings used to create self.http_client, defaults to None + :type http_client_proxies: Optional[Dict | str], optional """ self.model = model @@ -66,6 +73,9 @@ class BaseLlmConfig(ABC): self.top_p = top_p self.top_k = top_k + # AzureOpenAI specific + self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None + # Openrouter specific self.models = models self.route = route diff --git a/mem0/embeddings/azure_openai.py b/mem0/embeddings/azure_openai.py index 10c804ec..533d63ec 100644 --- a/mem0/embeddings/azure_openai.py +++ b/mem0/embeddings/azure_openai.py @@ -17,7 +17,7 @@ class AzureOpenAIEmbedding(EmbeddingBase): self.config.embedding_dims = 1536 api_key = os.getenv("AZURE_OPENAI_API_KEY") or self.config.api_key - self.client = AzureOpenAI(api_key=api_key) + self.client = AzureOpenAI(api_key=api_key, http_client=self.config.http_client) def embed(self, text): """ diff --git a/mem0/llms/azure_openai.py b/mem0/llms/azure_openai.py index 3de9cedc..0cc8d102 100644 --- a/mem0/llms/azure_openai.py +++ b/mem0/llms/azure_openai.py @@ -15,10 +15,9 @@ class AzureOpenAILLM(LLMBase): # Model name should match the custom deployment name chosen for it. if not self.config.model: self.config.model = "gpt-4o" - self.client = AzureOpenAI() api_key = os.getenv("AZURE_OPENAI_API_KEY") or self.config.api_key - self.client = AzureOpenAI(api_key=api_key) + self.client = AzureOpenAI(api_key=api_key, http_client=self.config.http_client) def _parse_response(self, response, tools): """ diff --git a/tests/llms/test_azure_openai.py b/tests/llms/test_azure_openai.py index 95b73d80..4da64862 100644 --- a/tests/llms/test_azure_openai.py +++ b/tests/llms/test_azure_openai.py @@ -1,5 +1,8 @@ + +import httpx import pytest from unittest.mock import Mock, patch + from mem0.llms.azure_openai import AzureOpenAILLM from mem0.configs.llms.base import BaseLlmConfig @@ -91,4 +94,21 @@ def test_generate_response_with_tools(mock_openai_client): assert len(response["tool_calls"]) == 1 assert response["tool_calls"][0]["name"] == "add_memory" assert response["tool_calls"][0]["arguments"] == {'data': 'Today is a sunny day.'} - \ No newline at end of file + +def test_generate_with_http_proxies(): + mock_http_client = Mock(spec=httpx.Client) + mock_http_client_instance = Mock(spec=httpx.Client) + mock_http_client.return_value = mock_http_client_instance + + with (patch("mem0.llms.azure_openai.AzureOpenAI") as mock_azure_openai, + patch("httpx.Client", new=mock_http_client) as mock_http_client): + config = BaseLlmConfig(model=MODEL, temperature=TEMPERATURE, max_tokens=MAX_TOKENS, top_p=TOP_P, + api_key="test", http_client_proxies="http://testproxy.mem0.net:8000") + + _ = AzureOpenAILLM(config) + + mock_azure_openai.assert_called_once_with( + api_key="test", + http_client=mock_http_client_instance + ) + mock_http_client.assert_called_once_with(proxies="http://testproxy.mem0.net:8000")