Rename embedchain to mem0 and open sourcing code for long term memory (#1474)

Co-authored-by: Deshraj Yadav <deshrajdry@gmail.com>
This commit is contained in:
Taranjeet Singh
2024-07-12 07:51:33 -07:00
committed by GitHub
parent 83e8c97295
commit f842a92e25
665 changed files with 9427 additions and 6592 deletions

View File

@@ -0,0 +1,8 @@
llm:
provider: anthropic
config:
model: 'claude-instant-1'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false

View File

@@ -0,0 +1,19 @@
app:
config:
id: azure-openai-app
llm:
provider: azure_openai
config:
model: gpt-35-turbo
deployment_name: your_llm_deployment_name
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
embedder:
provider: azure_openai
config:
model: text-embedding-ada-002
deployment_name: you_embedding_model_deployment_name

View File

@@ -0,0 +1,24 @@
app:
config:
id: 'my-app'
llm:
provider: openai
config:
model: 'gpt-3.5-turbo'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
vectordb:
provider: chroma
config:
collection_name: 'my-app'
dir: db
allow_reset: true
embedder:
provider: openai
config:
model: 'text-embedding-ada-002'

View File

@@ -0,0 +1,4 @@
chunker:
chunk_size: 100
chunk_overlap: 20
length_function: 'len'

View File

@@ -0,0 +1,12 @@
llm:
provider: clarifai
config:
model: "https://clarifai.com/mistralai/completion/models/mistral-7B-Instruct"
model_kwargs:
temperature: 0.5
max_tokens: 1000
embedder:
provider: clarifai
config:
model: "https://clarifai.com/clarifai/main/models/BAAI-bge-base-en-v15"

View File

@@ -0,0 +1,7 @@
llm:
provider: cohere
config:
model: large
temperature: 0.5
max_tokens: 1000
top_p: 1

View File

@@ -0,0 +1,40 @@
app:
config:
id: 'full-stack-app'
chunker:
chunk_size: 100
chunk_overlap: 20
length_function: 'len'
llm:
provider: openai
config:
model: 'gpt-3.5-turbo'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
prompt: |
Use the following pieces of context to answer the query at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
$context
Query: $query
Helpful Answer:
system_prompt: |
Act as William Shakespeare. Answer the following questions in the style of William Shakespeare.
vectordb:
provider: chroma
config:
collection_name: 'my-collection-name'
dir: db
allow_reset: true
embedder:
provider: openai
config:
model: 'text-embedding-ada-002'

View File

@@ -0,0 +1,13 @@
llm:
provider: google
config:
model: gemini-pro
max_tokens: 1000
temperature: 0.9
top_p: 1.0
stream: false
embedder:
provider: google
config:
model: models/embedding-001

View File

@@ -0,0 +1,8 @@
llm:
provider: openai
config:
model: 'gpt-4'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false

View File

@@ -0,0 +1,11 @@
llm:
provider: gpt4all
config:
model: 'orca-mini-3b-gguf2-q4_0.gguf'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
embedder:
provider: gpt4all

View File

@@ -0,0 +1,8 @@
llm:
provider: huggingface
config:
model: 'google/flan-t5-xxl'
temperature: 0.5
max_tokens: 1000
top_p: 0.5
stream: false

View File

@@ -0,0 +1,7 @@
llm:
provider: jina
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false

View File

@@ -0,0 +1,8 @@
llm:
provider: llama2
config:
model: 'a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5'
temperature: 0.5
max_tokens: 1000
top_p: 0.5
stream: false

View File

@@ -0,0 +1,14 @@
llm:
provider: ollama
config:
model: 'llama2'
temperature: 0.5
top_p: 1
stream: true
base_url: http://localhost:11434
embedder:
provider: ollama
config:
model: 'mxbai-embed-large:latest'
base_url: http://localhost:11434

View File

@@ -0,0 +1,33 @@
app:
config:
id: 'my-app'
log_level: 'WARNING'
collect_metrics: true
collection_name: 'my-app'
llm:
provider: openai
config:
model: 'gpt-3.5-turbo'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
vectordb:
provider: opensearch
config:
opensearch_url: 'https://localhost:9200'
http_auth:
- admin
- admin
vector_dimension: 1536
collection_name: 'my-app'
use_ssl: false
verify_certs: false
embedder:
provider: openai
config:
model: 'text-embedding-ada-002'
deployment_name: 'my-app'

View File

@@ -0,0 +1,25 @@
app:
config:
id: 'open-source-app'
collect_metrics: false
llm:
provider: gpt4all
config:
model: 'orca-mini-3b-gguf2-q4_0.gguf'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
vectordb:
provider: chroma
config:
collection_name: 'open-source-app'
dir: db
allow_reset: true
embedder:
provider: gpt4all
config:
deployment_name: 'test-deployment'

View File

@@ -0,0 +1,6 @@
vectordb:
provider: pinecone
config:
metric: cosine
vector_dimension: 1536
collection_name: my-pinecone-index

View File

@@ -0,0 +1,26 @@
pipeline:
config:
name: Example pipeline
id: pipeline-1 # Make sure that id is different every time you create a new pipeline
vectordb:
provider: chroma
config:
collection_name: pipeline-1
dir: db
allow_reset: true
llm:
provider: gpt4all
config:
model: 'orca-mini-3b-gguf2-q4_0.gguf'
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
embedding_model:
provider: gpt4all
config:
model: 'all-MiniLM-L6-v2'
deployment_name: null

View File

@@ -0,0 +1,6 @@
llm:
provider: together
config:
model: mistralai/Mixtral-8x7B-Instruct-v0.1
temperature: 0.5
max_tokens: 1000

View File

@@ -0,0 +1,6 @@
llm:
provider: vertexai
config:
model: 'chat-bison'
temperature: 0.5
top_p: 0.5

View File

@@ -0,0 +1,14 @@
llm:
provider: vllm
config:
model: 'meta-llama/Llama-2-70b-hf'
temperature: 0.5
top_p: 1
top_k: 10
stream: true
trust_remote_code: true
embedder:
provider: huggingface
config:
model: 'BAAI/bge-small-en-v1.5'

View File

@@ -0,0 +1,4 @@
vectordb:
provider: weaviate
config:
collection_name: my_weaviate_index