[docs]: Revamp embedchain docs (#799)

This commit is contained in:
Deshraj Yadav
2023-10-13 15:38:15 -07:00
committed by GitHub
parent a86d7f52e9
commit 4a8c50f886
68 changed files with 1175 additions and 673 deletions

8
configs/anthropic.yaml Normal file
View File

@@ -0,0 +1,8 @@
llm:
provider: anthropic
model: 'claude-instant-1'
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false

26
configs/chroma.yaml Normal file
View File

@@ -0,0 +1,26 @@
app:
config:
id: 'my-app'
collection_name: 'my-app'
llm:
provider: openai
model: 'gpt-3.5-turbo'
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
vectordb:
provider: chroma
config:
collection_name: 'my-app'
dir: db
allow_reset: true
embedder:
provider: openai
config:
model: 'text-embedding-ada-002'
deployment_name: null

7
configs/cohere.yaml Normal file
View File

@@ -0,0 +1,7 @@
llm:
provider: cohere
model: large
config:
temperature: 0.5
max_tokens: 1000
top_p: 1

35
configs/full-stack.yaml Normal file
View File

@@ -0,0 +1,35 @@
app:
config:
id: 'full-stack-app'
llm:
provider: openai
model: 'gpt-3.5-turbo'
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
template: |
Use the following pieces of context to answer the query at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
$context
Query: $query
Helpful Answer:
system_prompt: |
Act as William Shakespeare. Answer the following questions in the style of William Shakespeare.
vectordb:
provider: chroma
config:
collection_name: 'full-stack-app'
dir: db
allow_reset: true
embedder:
provider: openai
config:
model: 'text-embedding-ada-002'

13
configs/gpt4all.yaml Normal file
View File

@@ -0,0 +1,13 @@
llm:
provider: gpt4all
model: 'orca-mini-3b.ggmlv3.q4_0.bin'
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
embedder:
provider: gpt4all
config:
model: 'all-MiniLM-L6-v2'

8
configs/huggingface.yaml Normal file
View File

@@ -0,0 +1,8 @@
llm:
provider: huggingface
model: 'google/flan-t5-xxl'
config:
temperature: 0.5
max_tokens: 1000
top_p: 0.5
stream: false

7
configs/jina.yaml Normal file
View File

@@ -0,0 +1,7 @@
llm:
provider: jina
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false

8
configs/llama2.yaml Normal file
View File

@@ -0,0 +1,8 @@
llm:
provider: llama2
model: 'a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5'
config:
temperature: 0.5
max_tokens: 1000
top_p: 0.5
stream: false

33
configs/opensearch.yaml Normal file
View File

@@ -0,0 +1,33 @@
app:
config:
id: 'my-app'
log_level: 'WARN'
collect_metrics: true
collection_name: 'my-app'
llm:
provider: openai
model: 'gpt-3.5-turbo'
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
vectordb:
provider: opensearch
config:
opensearch_url: 'https://localhost:9200'
http_auth:
- admin
- admin
vector_dimension: 1536
collection_name: 'my-app'
use_ssl: false
verify_certs: false
embedder:
provider: openai
config:
model: 'text-embedding-ada-002'
deployment_name: null

27
configs/opensource.yaml Normal file
View File

@@ -0,0 +1,27 @@
app:
config:
id: 'open-source-app'
collection_name: 'open-source-app'
collect_metrics: false
llm:
provider: gpt4all
model: 'orca-mini-3b.ggmlv3.q4_0.bin'
config:
temperature: 0.5
max_tokens: 1000
top_p: 1
stream: false
vectordb:
provider: chroma
config:
collection_name: 'open-source-app'
dir: db
allow_reset: true
embedder:
provider: gpt4all
config:
model: 'all-MiniLM-L6-v2'
deployment_name: null

6
configs/vertexai.yaml Normal file
View File

@@ -0,0 +1,6 @@
llm:
provider: vertexai
model: 'chat-bison'
config:
temperature: 0.5
top_p: 0.5