From e1e7a10c36af1128ab4992f6d4562d8e399a52ad Mon Sep 17 00:00:00 2001
From: yi-ge
Date: Wed, 8 Mar 2023 14:33:44 +0800
Subject: [PATCH 1/7] chore: rename environment variables files
---
.env => .env.example | 0
.gitignore | 4 ++++
service/{.env => .env.example} | 0
3 files changed, 4 insertions(+)
rename .env => .env.example (100%)
rename service/{.env => .env.example} (100%)
diff --git a/.env b/.env.example
similarity index 100%
rename from .env
rename to .env.example
diff --git a/.gitignore b/.gitignore
index 5119246ca9..72f5944dc2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,3 +27,7 @@ coverage
*.njsproj
*.sln
*.sw?
+
+# Environment variables files
+/service/.env
+/.env
diff --git a/service/.env b/service/.env.example
similarity index 100%
rename from service/.env
rename to service/.env.example
From 8b50e206f865862c0488c8ec53e79c4c1d1905fe Mon Sep 17 00:00:00 2001
From: yi-ge
Date: Wed, 8 Mar 2023 14:45:54 +0800
Subject: [PATCH 2/7] docs: update README.md about .env file
---
README.en.md | 4 ++--
README.md | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/README.en.md b/README.en.md
index 6fa93e45dc..233c394ef3 100644
--- a/README.en.md
+++ b/README.en.md
@@ -55,7 +55,7 @@ Comparison:
[Details](https://github.com/Chanzhaoyu/chatgpt-web/issues/138)
Switching Methods:
-1. Go to the `service/.env` file.
+1. Go to the `service/.env.example` file and copy the contents to the `service/.env` file.
2. For `OpenAI API Key`, fill in the `OPENAI_API_KEY` field [(Get apiKey)](https://platform.openai.com/overview).
3. For `Web API`, fill in the `OPENAI_ACCESS_TOKEN` field [(Get accessToken)](https://chat.openai.com/api/auth/session).
4. When both are present, `OpenAI API Key` takes precedence.
@@ -266,7 +266,7 @@ PS: You can also run `pnpm start` directly on the server without packaging.
#### Frontend webpage
-1. Modify `VITE_APP_API_BASE_URL` in `.env` at the root directory to your actual backend interface address.
+1. Refer to the root directory `.env.example` file content to create `.env` file, modify `VITE_APP_API_BASE_URL` in `.env` at the root directory to your actual backend interface address.
2. Run the following command in the root directory and then copy the files in the `dist` folder to the root directory of your website service.
[Reference information](https://cn.vitejs.dev/guide/static-deploy.html#building-the-app)
diff --git a/README.md b/README.md
index 4f421fb154..1adad96291 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@
[查看详情](https://github.com/Chanzhaoyu/chatgpt-web/issues/138)
切换方式:
-1. 进入 `service/.env` 文件
+1. 进入 `service/.env.example` 文件,复制内容到 `service/.env` 文件
2. 使用 `OpenAI API Key` 请填写 `OPENAI_API_KEY` 字段 [(获取 apiKey)](https://platform.openai.com/overview)
3. 使用 `Web API` 请填写 `OPENAI_ACCESS_TOKEN` 字段 [(获取 accessToken)](https://chat.openai.com/api/auth/session)
4. 同时存在时以 `OpenAI API Key` 优先
@@ -261,7 +261,7 @@ PS: 不进行打包,直接在服务器上运行 `pnpm start` 也可
#### 前端网页
-1、修改根目录下 `.env` 内 `VITE_APP_API_BASE_URL` 为你的实际后端接口地址
+1、参考根目录下 `.env.example` 文件内容创建 `.env` 文件,修改 `VITE_APP_API_BASE_URL` 为你的实际后端接口地址
2、根目录下运行以下命令,然后将 `dist` 文件夹内的文件复制到你网站服务的根目录下
From dcf221a8db04505ac7d9cf5d9dd68b84ee2979b6 Mon Sep 17 00:00:00 2001
From: yi-ge
Date: Fri, 10 Mar 2023 00:39:40 +0800
Subject: [PATCH 3/7] feat: support long reply
---
.env.example | 2 +
service/src/chatgpt/index.ts | 4 +-
src/views/chat/index.vue | 161 ++++++++++++++++++++---------------
3 files changed, 97 insertions(+), 70 deletions(-)
diff --git a/.env.example b/.env.example
index 1977a1e624..b311ce4ab6 100644
--- a/.env.example
+++ b/.env.example
@@ -2,3 +2,5 @@
VITE_GLOB_API_URL=/api
VITE_APP_API_BASE_URL=http://localhost:3002/
+
+VITE_GLOB_OPEN_LONG_REPLY=true
diff --git a/service/src/chatgpt/index.ts b/service/src/chatgpt/index.ts
index b3cb1e0df9..3c2c9dbf46 100644
--- a/service/src/chatgpt/index.ts
+++ b/service/src/chatgpt/index.ts
@@ -84,8 +84,8 @@ async function chatReplyProcess(
lastContext?: { conversationId?: string; parentMessageId?: string },
process?: (chat: ChatMessage) => void,
) {
- if (!message)
- return sendResponse({ type: 'Fail', message: 'Message is empty' })
+ // if (!message)
+ // return sendResponse({ type: 'Fail', message: 'Message is empty' })
try {
let options: SendMessageOptions = { timeoutMs }
diff --git a/src/views/chat/index.vue b/src/views/chat/index.vue
index 98b6230f8a..6ed0b9e794 100644
--- a/src/views/chat/index.vue
+++ b/src/views/chat/index.vue
@@ -15,6 +15,8 @@ import { t } from '@/locales'
let controller = new AbortController()
+const openLongReply = import.meta.env.VITE_GLOB_OPEN_LONG_REPLY === 'true'
+
const route = useRoute()
const dialog = useDialog()
const ms = useMessage()
@@ -40,7 +42,7 @@ function handleSubmit() {
}
async function onConversation() {
- const message = prompt.value
+ let message = prompt.value
if (loading.value)
return
@@ -87,40 +89,52 @@ async function onConversation() {
scrollToBottom()
try {
- await fetchChatAPIProcess({
- prompt: message,
- options,
- signal: controller.signal,
- onDownloadProgress: ({ event }) => {
- const xhr = event.target
- const { responseText } = xhr
- // Always process the final line
- const lastIndex = responseText.lastIndexOf('\n')
- let chunk = responseText
- if (lastIndex !== -1)
- chunk = responseText.substring(lastIndex)
- try {
- const data = JSON.parse(chunk)
- updateChat(
- +uuid,
- dataSources.value.length - 1,
- {
- dateTime: new Date().toLocaleString(),
- text: data.text ?? '',
- inversion: false,
- error: false,
- loading: false,
- conversationOptions: { conversationId: data.conversationId, parentMessageId: data.id },
- requestOptions: { prompt: message, options: { ...options } },
- },
- )
- scrollToBottomIfAtBottom()
- }
- catch (error) {
+ let lastText = ''
+ const fetchChatAPIOnce = async () => {
+ await fetchChatAPIProcess({
+ prompt: message,
+ options,
+ signal: controller.signal,
+ onDownloadProgress: ({ event }) => {
+ const xhr = event.target
+ const { responseText } = xhr
+ // Always process the final line
+ const lastIndex = responseText.lastIndexOf('\n')
+ let chunk = responseText
+ if (lastIndex !== -1)
+ chunk = responseText.substring(lastIndex)
+ try {
+ const data = JSON.parse(chunk)
+ updateChat(
+ +uuid,
+ dataSources.value.length - 1,
+ {
+ dateTime: new Date().toLocaleString(),
+ text: lastText + data.text ?? '',
+ inversion: false,
+ error: false,
+ loading: false,
+ conversationOptions: { conversationId: data.conversationId, parentMessageId: data.id },
+ requestOptions: { prompt: message, options: { ...options } },
+ },
+ )
+ if (openLongReply && data.detail.choices[0].finish_reason === 'length') {
+ options.parentMessageId = data.id
+ lastText = data.text
+ message = ''
+ return fetchChatAPIOnce()
+ }
+
+ scrollToBottomIfAtBottom()
+ }
+ catch (error) {
//
- }
- },
- })
+ }
+ },
+ })
+ }
+
+ await fetchChatAPIOnce()
}
catch (error: any) {
const errorMessage = error?.message ?? t('common.wrong')
@@ -180,7 +194,7 @@ async function onRegenerate(index: number) {
const { requestOptions } = dataSources.value[index]
- const message = requestOptions?.prompt ?? ''
+ let message = requestOptions?.prompt ?? ''
let options: Chat.ConversationRequest = {}
@@ -204,39 +218,50 @@ async function onRegenerate(index: number) {
)
try {
- await fetchChatAPIProcess({
- prompt: message,
- options,
- signal: controller.signal,
- onDownloadProgress: ({ event }) => {
- const xhr = event.target
- const { responseText } = xhr
- // Always process the final line
- const lastIndex = responseText.lastIndexOf('\n')
- let chunk = responseText
- if (lastIndex !== -1)
- chunk = responseText.substring(lastIndex)
- try {
- const data = JSON.parse(chunk)
- updateChat(
- +uuid,
- index,
- {
- dateTime: new Date().toLocaleString(),
- text: data.text ?? '',
- inversion: false,
- error: false,
- loading: false,
- conversationOptions: { conversationId: data.conversationId, parentMessageId: data.id },
- requestOptions: { prompt: message, ...options },
- },
- )
- }
- catch (error) {
- //
- }
- },
- })
+ let lastText = ''
+ const fetchChatAPIOnce = async () => {
+ await fetchChatAPIProcess({
+ prompt: message,
+ options,
+ signal: controller.signal,
+ onDownloadProgress: ({ event }) => {
+ const xhr = event.target
+ const { responseText } = xhr
+ // Always process the final line
+ const lastIndex = responseText.lastIndexOf('\n')
+ let chunk = responseText
+ if (lastIndex !== -1)
+ chunk = responseText.substring(lastIndex)
+ try {
+ const data = JSON.parse(chunk)
+ updateChat(
+ +uuid,
+ index,
+ {
+ dateTime: new Date().toLocaleString(),
+ text: lastText + data.text ?? '',
+ inversion: false,
+ error: false,
+ loading: false,
+ conversationOptions: { conversationId: data.conversationId, parentMessageId: data.id },
+ requestOptions: { prompt: message, ...options },
+ },
+ )
+
+ if (openLongReply && data.detail.choices[0].finish_reason === 'length') {
+ options.parentMessageId = data.id
+ lastText = data.text
+ message = ''
+ return fetchChatAPIOnce()
+ }
+ }
+ catch (error) {
+ //
+ }
+ },
+ })
+ }
+ await fetchChatAPIOnce()
}
catch (error: any) {
if (error.message === 'canceled') {
From 8614deefb7f27dab25d0ce3b88fdc6d6e2af0f39 Mon Sep 17 00:00:00 2001
From: yi-ge
Date: Fri, 10 Mar 2023 09:36:09 +0800
Subject: [PATCH 4/7] chore: upgrade chatgpt package and set long reply to
false default
---
service/package.json | 2 +-
service/pnpm-lock.yaml | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/service/package.json b/service/package.json
index 9f051f48d1..aa75b7c6a9 100644
--- a/service/package.json
+++ b/service/package.json
@@ -24,7 +24,7 @@
"common:cleanup": "rimraf node_modules && rimraf pnpm-lock.yaml"
},
"dependencies": {
- "chatgpt": "^5.0.8",
+ "chatgpt": "^5.0.9",
"dotenv": "^16.0.3",
"esno": "^0.16.3",
"express": "^4.18.2",
diff --git a/service/pnpm-lock.yaml b/service/pnpm-lock.yaml
index 5f3eb483e9..a27fef4e58 100644
--- a/service/pnpm-lock.yaml
+++ b/service/pnpm-lock.yaml
@@ -4,7 +4,7 @@ specifiers:
'@antfu/eslint-config': ^0.35.3
'@types/express': ^4.17.17
'@types/node': ^18.14.6
- chatgpt: ^5.0.8
+ chatgpt: ^5.0.9
dotenv: ^16.0.3
eslint: ^8.35.0
esno: ^0.16.3
@@ -17,7 +17,7 @@ specifiers:
typescript: ^4.9.5
dependencies:
- chatgpt: 5.0.8
+ chatgpt: 5.0.9
dotenv: 16.0.3
esno: 0.16.3
express: 4.18.2
@@ -902,8 +902,8 @@ packages:
resolution: {integrity: sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==}
dev: true
- /chatgpt/5.0.8:
- resolution: {integrity: sha512-Bjh7Y15QIsZ+SkQvbbZGymv1PGxkZ7X1vwqAwvyqaMMhbipU4kxht/GL62VCxhoUCXPwxTfScbFeNFtNldgqaw==}
+ /chatgpt/5.0.9:
+ resolution: {integrity: sha512-H0MMegLKcYyYh3LeFO4ubIdJSiSAl4rRjTeXf3KjHfGXDM7QZ1EkiTH9RuIoaNzOm8rJTn4QEhrwBbOIpbalxw==}
engines: {node: '>=14'}
hasBin: true
dependencies:
From 9133f99247e2ad8fc32939f08566ce6d87ac890d Mon Sep 17 00:00:00 2001
From: yi-ge
Date: Fri, 10 Mar 2023 09:37:22 +0800
Subject: [PATCH 5/7] chore: set long reply to false default
---
.env | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.env b/.env
index b311ce4ab6..53fd4d07ce 100644
--- a/.env
+++ b/.env
@@ -3,4 +3,5 @@ VITE_GLOB_API_URL=/api
VITE_APP_API_BASE_URL=http://localhost:3002/
-VITE_GLOB_OPEN_LONG_REPLY=true
+# Whether long replies are supported, which may result in higher API fees
+VITE_GLOB_OPEN_LONG_REPLY=false
From 77e0e9a10bb73597957a4f36c6589bec42a596dd Mon Sep 17 00:00:00 2001
From: yi-ge
Date: Sat, 11 Mar 2023 18:39:22 +0800
Subject: [PATCH 6/7] fix: change maxRows to 8
---
src/views/chat/index.vue | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/views/chat/index.vue b/src/views/chat/index.vue
index f18cd4b8cc..de18299230 100644
--- a/src/views/chat/index.vue
+++ b/src/views/chat/index.vue
@@ -493,7 +493,7 @@ onUnmounted(() => {
From 4c4bd8c851a545af234ebc54cd44d9308638fa88 Mon Sep 17 00:00:00 2001
From: ChenZhaoYu <790348264@qq.com>
Date: Sun, 12 Mar 2023 19:27:44 +0800
Subject: [PATCH 7/7] feat: mobile max row
---
src/views/chat/index.vue | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/views/chat/index.vue b/src/views/chat/index.vue
index 338d941a8a..9d1e1c691a 100644
--- a/src/views/chat/index.vue
+++ b/src/views/chat/index.vue
@@ -527,7 +527,7 @@ onUnmounted(() => {
v-model:value="prompt"
type="textarea"
:placeholder="placeholder"
- :autosize="{ minRows: 1, maxRows: 8 }"
+ :autosize="{ minRows: 1, maxRows: isMobile ? 4 : 8 }"
@input="handleInput"
@focus="handleFocus"
@blur="handleBlur"