DeepSeek Integration (#2173)
This commit is contained in:
@@ -33,6 +33,8 @@ class BaseLlmConfig(ABC):
|
||||
azure_kwargs: Optional[AzureConfig] = {},
|
||||
# AzureOpenAI specific
|
||||
http_client_proxies: Optional[Union[Dict, str]] = None,
|
||||
# DeepSeek specific
|
||||
deepseek_base_url: Optional[str] = None,
|
||||
):
|
||||
"""
|
||||
Initializes a configuration class instance for the LLM.
|
||||
@@ -69,6 +71,8 @@ class BaseLlmConfig(ABC):
|
||||
:type azure_kwargs: Optional[Dict[str, Any]], defaults a dict inside init
|
||||
:param http_client_proxies: The proxy server(s) settings used to create self.http_client, defaults to None
|
||||
:type http_client_proxies: Optional[Dict | str], optional
|
||||
:param deepseek_base_url: DeepSeek base URL to be use, defaults to None
|
||||
:type deepseek_base_url: Optional[str], optional
|
||||
"""
|
||||
|
||||
self.model = model
|
||||
@@ -92,5 +96,8 @@ class BaseLlmConfig(ABC):
|
||||
# Ollama specific
|
||||
self.ollama_base_url = ollama_base_url
|
||||
|
||||
# DeepSeek specific
|
||||
self.deepseek_base_url = deepseek_base_url
|
||||
|
||||
# AzureOpenAI specific
|
||||
self.azure_kwargs = AzureConfig(**azure_kwargs) or {}
|
||||
|
||||
@@ -22,6 +22,7 @@ class LlmConfig(BaseModel):
|
||||
"openai_structured",
|
||||
"azure_openai_structured",
|
||||
"gemini",
|
||||
"deepseek",
|
||||
):
|
||||
return v
|
||||
else:
|
||||
|
||||
84
mem0/llms/deepseek.py
Normal file
84
mem0/llms/deepseek.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
|
||||
|
||||
class DeepSeekLLM(LLMBase):
|
||||
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
||||
super().__init__(config)
|
||||
|
||||
if not self.config.model:
|
||||
self.config.model = "deepseek-chat"
|
||||
|
||||
api_key = self.config.api_key or os.getenv("DEEPSEEK_API_KEY")
|
||||
base_url = self.config.deepseek_base_url or os.getenv("DEEPSEEK_API_BASE") or "https://api.deepseek.com"
|
||||
self.client = OpenAI(api_key=api_key, base_url=base_url)
|
||||
|
||||
def _parse_response(self, response, tools):
|
||||
"""
|
||||
Process the response based on whether tools are used or not.
|
||||
|
||||
Args:
|
||||
response: The raw response from API.
|
||||
tools: The list of tools provided in the request.
|
||||
|
||||
Returns:
|
||||
str or dict: The processed response.
|
||||
"""
|
||||
if tools:
|
||||
processed_response = {
|
||||
"content": response.choices[0].message.content,
|
||||
"tool_calls": [],
|
||||
}
|
||||
|
||||
if response.choices[0].message.tool_calls:
|
||||
for tool_call in response.choices[0].message.tool_calls:
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
}
|
||||
)
|
||||
|
||||
return processed_response
|
||||
else:
|
||||
return response.choices[0].message.content
|
||||
|
||||
def generate_response(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
response_format=None,
|
||||
tools: Optional[List[Dict]] = None,
|
||||
tool_choice: str = "auto",
|
||||
):
|
||||
"""
|
||||
Generate a response based on the given messages using DeepSeek.
|
||||
|
||||
Args:
|
||||
messages (list): List of message dicts containing 'role' and 'content'.
|
||||
response_format (str or object, optional): Format of the response. Defaults to "text".
|
||||
tools (list, optional): List of tools that the model can call. Defaults to None.
|
||||
tool_choice (str, optional): Tool choice method. Defaults to "auto".
|
||||
|
||||
Returns:
|
||||
str: The generated response.
|
||||
"""
|
||||
params = {
|
||||
"model": self.config.model,
|
||||
"messages": messages,
|
||||
"temperature": self.config.temperature,
|
||||
"max_tokens": self.config.max_tokens,
|
||||
"top_p": self.config.top_p,
|
||||
}
|
||||
|
||||
if tools:
|
||||
params["tools"] = tools
|
||||
params["tool_choice"] = tool_choice
|
||||
|
||||
response = self.client.chat.completions.create(**params)
|
||||
return self._parse_response(response, tools)
|
||||
@@ -23,6 +23,7 @@ class LlmFactory:
|
||||
"anthropic": "mem0.llms.anthropic.AnthropicLLM",
|
||||
"azure_openai_structured": "mem0.llms.azure_openai_structured.AzureOpenAIStructuredLLM",
|
||||
"gemini": "mem0.llms.gemini.GeminiLLM",
|
||||
"deepseek": "mem0.llms.deepseek.DeepSeekLLM",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
||||
Reference in New Issue
Block a user