Add TogetherAI support (#1485)
This commit is contained in:
40
mem0/llms/together.py
Normal file
40
mem0/llms/together.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from together import Together
|
||||
|
||||
from mem0.llms.base import LLMBase
|
||||
|
||||
|
||||
class TogetherLLM(LLMBase):
|
||||
def __init__(self, model="mistralai/Mixtral-8x7B-Instruct-v0.1"):
|
||||
self.client = Together()
|
||||
self.model = model
|
||||
|
||||
def generate_response(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
response_format=None,
|
||||
tools: Optional[List[Dict]] = None,
|
||||
tool_choice: str = "auto",
|
||||
):
|
||||
"""
|
||||
Generate a response based on the given messages using TogetherAI.
|
||||
|
||||
Args:
|
||||
messages (list): List of message dicts containing 'role' and 'content'.
|
||||
response_format (str or object, optional): Format of the response. Defaults to "text".
|
||||
tools (list, optional): List of tools that the model can call. Defaults to None.
|
||||
tool_choice (str, optional): Tool choice method. Defaults to "auto".
|
||||
|
||||
Returns:
|
||||
str: The generated response.
|
||||
"""
|
||||
params = {"model": self.model, "messages": messages}
|
||||
if response_format:
|
||||
params["response_format"] = response_format
|
||||
if tools:
|
||||
params["tools"] = tools
|
||||
params["tool_choice"] = tool_choice
|
||||
|
||||
response = self.client.chat.completions.create(**params)
|
||||
return response
|
||||
@@ -11,7 +11,8 @@ class LlmFactory:
|
||||
provider_to_class = {
|
||||
"ollama": "mem0.llms.ollama.py.OllamaLLM",
|
||||
"openai": "mem0.llms.openai.OpenAILLM",
|
||||
"groq": "mem0.llms.groq.GroqLLM"
|
||||
"groq": "mem0.llms.groq.GroqLLM",
|
||||
"together": "mem0.llms.together.TogetherLLM"
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
||||
Reference in New Issue
Block a user