-
Notifications
You must be signed in to change notification settings - Fork 10
/
.env.template
55 lines (42 loc) · 2.5 KB
/
.env.template
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
############################## OperateGPT Embeddings Model #######################################
# embeddings model: all-MiniLM-L6-v2,text2vec-large-chinese, bge-large-zh
EMBEDDING_MODEL=all-MiniLM-L6-v2
# default use Chroma! weivate and milvus will be support later.
VECTOR_STORE_TYPE=Chroma
############################## OperateGPT LLM Model #######################################
# if your environment can not access to openai, you should start a chatgpt-proxy service.
# Recommend: https://github.com/Yidadaa/ChatGPT-Next-Web by Docker, then set OPEN_AI_PROXY_SERVER_URL=http://{YOUR-SERVER-IP}:3000/api/openai/v1/chat/completions
OPEN_AI_PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions
# your openai key
OPEN_AI_KEY=sk-xxx
##############################[Optional] OperateGPT Text2Image Model #######################################
# your stable diffusion proxy service address (recommend: https://github.com/xuyuan23/stablediffusion-proxy)
SD_MODEL=stable-diffusion-xl-base-1.0
SD_PROXY_URL=http://127.0.0.1:7860
##############################[Optional] OperateGPT Text2Video Model #######################################
# your text2video model, default is zeroscope_v2_576w (more details recommend: https://github.com/xuyuan23/Text2Video)
T2V_MODEL=zeroscope_v2_576w
#T2V_MODEL=text-to-video-ms-1.7b
# your text2video proxy service address [recommend]
T2V_PROXY_URL=http://127.0.0.1:7861
##############################[Optional] OperateGPT WEB SERVER #######################################
# Database Config
DB_USER=root
DB_PASSWD=123456
DB_HOST=127.0.0.1
DB_PORT=3306
DB_NAME=operategpt
# LLM manager
LLM_MANAGE_SERVER_PORT=8007
# [Optional] your local LLM server, python operategpt/llms/llmserver.py
LLM_SERVER_PORT=8008
# LLM_NAME: proxyllm, chatglm2-6b, vicuna-13b, baichuan2-13b... see operategpt/llms/model_config.py
LLM_NAME=proxyllm
##############################[Optional] Language #######################################
# set SERVER_MODE=local the operation chapter will be generated locally, default /data/operation_data/xxx.md
# set SERVER_MODE=online, the operation chapter will be generated in your server, and DOWNLOAD_FOLDER and GENERATED_OPERATE_DATA_DEFAULT_DIR should be specified.
SERVER_MODE=local
# the image, videos, mdfiles will set here. you can download directly through http request.
DOWNLOAD_FOLDER=http://xxx/experience
# The operation chapters where to generate, if deployed on cloud server, /var/www/html/experience is recommanded
GENERATED_OPERATE_DATA_DEFAULT_DIR=/var/www/html/experience