Fix langchain llm and update changelog (#2508)
This commit is contained in:
2
Makefile
2
Makefile
@@ -13,7 +13,7 @@ install:
|
||||
install_all:
|
||||
poetry install
|
||||
poetry run pip install groq together boto3 litellm ollama chromadb weaviate weaviate-client sentence_transformers vertexai \
|
||||
google-generativeai elasticsearch opensearch-py vecs pinecone pinecone-text faiss-cpu
|
||||
google-generativeai elasticsearch opensearch-py vecs pinecone pinecone-text faiss-cpu langchain-community
|
||||
|
||||
# Format code with ruff
|
||||
format:
|
||||
|
||||
@@ -6,6 +6,25 @@ mode: "wide"
|
||||
<Tabs>
|
||||
<Tab title="Python">
|
||||
|
||||
<Update label="2025-04-07" description="v0.1.82">
|
||||
|
||||
**New Features:**
|
||||
- **LLM Integrations:** Added support for Langchain LLMs, Google as new LLM and embedder
|
||||
- **Development:** Added development docker compose
|
||||
|
||||
**Improvements:**
|
||||
- **Output Format:** Set output_format='v1.1' and updated documentation
|
||||
|
||||
**Documentation:**
|
||||
- **Integrations:** Added LMStudio and Together.ai documentation
|
||||
- **API Reference:** Updated output_format documentation
|
||||
- **Integrations:** Added PipeCat integration documentation
|
||||
- **Integrations:** Added Flowise integration documentation for Mem0 memory setup
|
||||
|
||||
**Bug Fixes:**
|
||||
- **Tests:** Fixed failing unit tests
|
||||
</Update>
|
||||
|
||||
<Update label="2025-04-02" description="v0.1.79">
|
||||
|
||||
**New Features:**
|
||||
@@ -56,6 +75,17 @@ mode: "wide"
|
||||
|
||||
<Tab title="TypeScript">
|
||||
|
||||
<Update label="2025-04-01" description="v2.1.14">
|
||||
**New Features:**
|
||||
- **Mastra Example:** Added Mastra example
|
||||
- **Integrations:** Added Flowise integration documentation for Mem0 memory setup
|
||||
|
||||
**Improvements:**
|
||||
- **Demo:** Updated Demo Mem0AI
|
||||
- **Client:** Enhanced Ping method in Mem0 Client
|
||||
- **AI SDK:** Updated AI SDK implementation
|
||||
</Update>
|
||||
|
||||
<Update label="2025-03-29" description="v2.1.13">
|
||||
**Improvements:**
|
||||
- **Introuced `ping` method to check if API key is valid and populate org/project id**
|
||||
|
||||
@@ -12,7 +12,7 @@ except ImportError:
|
||||
|
||||
# Provider-specific package mapping
|
||||
PROVIDER_PACKAGES = {
|
||||
# "Anthropic": "langchain_anthropic", # Special handling for Anthropic with Pydantic v2
|
||||
"Anthropic": "langchain_anthropic",
|
||||
"MistralAI": "langchain_mistralai",
|
||||
"Fireworks": "langchain_fireworks",
|
||||
"AzureOpenAI": "langchain_openai",
|
||||
@@ -135,6 +135,13 @@ class LangchainLLM(LLMBase):
|
||||
try:
|
||||
# Check if this provider needs a specialized package
|
||||
if provider in PROVIDER_PACKAGES:
|
||||
if provider == "Anthropic": # Special handling for Anthropic with Pydantic v2
|
||||
try:
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
model_class = ChatAnthropic
|
||||
except ImportError:
|
||||
raise ImportError("langchain_anthropic not found. Please install it with `pip install langchain-anthropic`")
|
||||
else:
|
||||
package_name = PROVIDER_PACKAGES[provider]
|
||||
try:
|
||||
# Import the model class directly from the package
|
||||
@@ -158,8 +165,7 @@ class LangchainLLM(LLMBase):
|
||||
self.langchain_model = model_class(
|
||||
model=self.config.model,
|
||||
temperature=self.config.temperature,
|
||||
max_tokens=self.config.max_tokens,
|
||||
api_key=self.config.api_key,
|
||||
max_tokens=self.config.max_tokens
|
||||
)
|
||||
except (ImportError, AttributeError, ValueError) as e:
|
||||
raise ImportError(f"Error setting up langchain model for provider {provider}: {str(e)}")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "mem0ai"
|
||||
version = "0.1.82"
|
||||
version = "0.1.83"
|
||||
description = "Long-term memory for AI Agents"
|
||||
authors = ["Mem0 <founders@mem0.ai>"]
|
||||
exclude = [
|
||||
|
||||
Reference in New Issue
Block a user