Update Config params when using Local Ollama models (#1690)

This commit is contained in:
Samuel Devdas
2024-08-13 08:28:14 +02:00
committed by GitHub
parent 5cea47947c
commit 31ef9135e7

View File

@@ -206,18 +206,32 @@ import os
from mem0 import Memory
config = {
"vector_store":{
"vector_store": {
"provider": "qdrant",
"config": {
"embedding_model_dims": 768 # change according to embedding model
}
"collection_name": "test",
"host": "localhost",
"port": 6333,
"embedding_model_dims": 768, # (For Nomic == 768), could be some other embedding size, change this according to your local models dimensions
},
},
"llm": {
"provider": "ollama"
"provider": "ollama",
"config": {
"model": "llama3.1:latest",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Ensure this is correct
},
},
"embedder": {
"provider": "ollama"
}
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
# "model": "snowflake-arctic-embed:latest",
"ollama_base_url": "http://localhost:11434",
},
},
}
m = Memory.from_config(config)
@@ -325,4 +339,4 @@ print(history)
If you have any questions, please feel free to reach out to us using one of the following methods:
<Snippet file="get-help.mdx" />
<Snippet file="get-help.mdx" />