Add: Json Parsing to solve Hallucination Errors (#3013)
This commit is contained in:
@@ -10,6 +10,7 @@ except ImportError:
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
PROVIDERS = ["ai21", "amazon", "anthropic", "cohere", "meta", "mistral", "stability", "writer"]
|
||||
|
||||
@@ -101,7 +102,7 @@ class AWSBedrockLLM(LLMBase):
|
||||
return processed_response
|
||||
|
||||
response_body = response.get("body").read().decode()
|
||||
response_json = json.loads(response_body)
|
||||
response_json = json.loads(extract_json(response_body))
|
||||
return response_json.get("content", [{"text": ""}])[0].get("text", "")
|
||||
|
||||
def _prepare_input(
|
||||
|
||||
@@ -6,6 +6,7 @@ from openai import AzureOpenAI
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
|
||||
class AzureOpenAILLM(LLMBase):
|
||||
@@ -53,7 +54,7 @@ class AzureOpenAILLM(LLMBase):
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from openai import OpenAI
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
|
||||
class DeepSeekLLM(LLMBase):
|
||||
@@ -41,7 +42,7 @@ class DeepSeekLLM(LLMBase):
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ except ImportError:
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
|
||||
class GroqLLM(LLMBase):
|
||||
@@ -43,7 +44,7 @@ class GroqLLM(LLMBase):
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ except ImportError:
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
|
||||
class LiteLLM(LLMBase):
|
||||
@@ -39,7 +40,7 @@ class LiteLLM(LLMBase):
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ from openai import OpenAI
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
|
||||
class OpenAILLM(LLMBase):
|
||||
@@ -62,7 +63,7 @@ class OpenAILLM(LLMBase):
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ except ImportError:
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
|
||||
class TogetherLLM(LLMBase):
|
||||
@@ -43,7 +44,7 @@ class TogetherLLM(LLMBase):
|
||||
processed_response["tool_calls"].append(
|
||||
{
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Dict, List, Optional
|
||||
|
||||
from mem0.configs.llms.base import BaseLlmConfig
|
||||
from mem0.llms.base import LLMBase
|
||||
from mem0.memory.utils import extract_json
|
||||
|
||||
|
||||
class VllmLLM(LLMBase):
|
||||
@@ -39,7 +40,7 @@ class VllmLLM(LLMBase):
|
||||
for tool_call in response.choices[0].message.tool_calls:
|
||||
processed_response["tool_calls"].append({
|
||||
"name": tool_call.function.name,
|
||||
"arguments": json.loads(tool_call.function.arguments),
|
||||
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
||||
})
|
||||
|
||||
return processed_response
|
||||
|
||||
Reference in New Issue
Block a user