Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d89d1e288d | ||
|
|
cd89d11d0b | ||
|
|
cf0053a060 | ||
|
|
019da4399e | ||
|
|
044961bb01 | ||
|
|
2374c81edb | ||
|
|
699760713e | ||
|
|
d75413cc49 | ||
|
|
8175f199d2 | ||
|
|
f8c2f396c1 | ||
|
|
8217647df8 | ||
|
|
288c9eeeca | ||
|
|
4d09ff7c8a | ||
|
|
5fa059017c | ||
|
|
323f10844b | ||
|
|
ee035390db | ||
|
|
be743bf799 | ||
|
|
a59f84f2bf | ||
|
|
ed0cf2997d | ||
|
|
7f00c74097 | ||
|
|
f007417fa4 | ||
|
|
27c5e2a3ac | ||
|
|
e90dc0c12b | ||
|
|
837fd8c9ff |
51
README.md
51
README.md
@@ -1,14 +1,29 @@
|
||||
<p align="center">
|
||||
<img alt="demo" src="./demos/demo.gif?v=1">
|
||||
</p>
|
||||
<div align="center">
|
||||
<h1>ChatGPT UI</h1>
|
||||
</div>
|
||||
|
||||
[English](./README.md) | [中文](./docs/zh/README.md)
|
||||
|
||||
# ChatGPT UI
|
||||
|
||||
A ChatGPT web client that supports multiple users, multiple database connections for persistent data storage, supports i18n. Provides Docker images and quick deployment scripts.
|
||||
|
||||
https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-ab06-3f75b5235bfe.mp4
|
||||
|
||||
|
||||
## 📢Updates
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-27</strong></summary>
|
||||
🚀 Support gpt-4 model. You can select the model in the "Model Parameters" of the front-end.
|
||||
The GPT-4 model requires whitelist access from OpenAI.
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-23</strong></summary>
|
||||
Added web search capability to generate more relevant and up-to-date answers from ChatGPT!
|
||||
This feature is off by default, you can turn it on in `Chat->Settings` in the admin panel, there is a record `open_web_search` in Settings, set its value to True.
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-15</strong></summary>
|
||||
|
||||
@@ -17,17 +32,7 @@ Add "open_registration" setting option in the admin panel to control whether use
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-10</strong></summary>
|
||||
|
||||
Add 2 environment variables to control the typewriter effect:
|
||||
|
||||
- `NUXT_PUBLIC_TYPEWRITER=true` to enable/disable the typewriter effect
|
||||
- `NUXT_PUBLIC_TYPEWRITER_DELAY=50` to set the delay time for each character in milliseconds.
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<details>
|
||||
<summary><strong>2023-03-04</strong></summary>
|
||||
|
||||
**Update to the latest official chat model** `gpt-3.5-turbo`
|
||||
@@ -90,9 +95,6 @@ services:
|
||||
image: wongsaang/chatgpt-ui-client:latest
|
||||
environment:
|
||||
- SERVER_DOMAIN=http://backend-web-server
|
||||
- NUXT_PUBLIC_APP_NAME='ChatGPT UI' # App name
|
||||
- NUXT_PUBLIC_TYPEWRITER=true # Enable typewriter effect, default is false
|
||||
- NUXT_PUBLIC_TYPEWRITER_DELAY=100 # Typewriter effect delay time, default is 50ms
|
||||
depends_on:
|
||||
- backend-web-server
|
||||
ports:
|
||||
@@ -115,6 +117,7 @@ services:
|
||||
# - EMAIL_HOST_USER=
|
||||
# - EMAIL_HOST_PASSWORD=
|
||||
# - EMAIL_USE_TLS=True
|
||||
# - EMAIL_FROM=no-reply@example.com #Default sender email address
|
||||
ports:
|
||||
- '8000:8000'
|
||||
networks:
|
||||
@@ -156,6 +159,16 @@ Before you can start chatting, you need to add an OpenAI API key. In the Setting
|
||||
|
||||
Now you can access the web client at `http(s)://your.domain` or `http://123.123.123.123` to start chatting.
|
||||
|
||||
## Donation
|
||||
|
||||
> If it is helpful to you, it is also helping me.
|
||||
|
||||
If you want to support me, Buy me a coffee ❤️ [https://www.buymeacoffee.com/WongSaang](https://www.buymeacoffee.com/WongSaang)
|
||||
|
||||
<p align="center">
|
||||
<img height="150" src="https://github.com/WongSaang/chatgpt-ui/blob/main/demos/bmc_qr.png?raw=true"/>
|
||||
</p>
|
||||
|
||||
## Development
|
||||
|
||||
### Setup
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
<script setup>
|
||||
|
||||
const dialog = ref(false)
|
||||
const currentModel = useCurrentModel()
|
||||
const availableModels = [
|
||||
DEFAULT_MODEL.name
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-4'
|
||||
]
|
||||
const currentModelDefault = ref(MODELS[currentModel.value.name])
|
||||
|
||||
watch(currentModel, (newVal, oldVal) => {
|
||||
currentModelDefault.value = MODELS[newVal.name]
|
||||
saveCurrentModel(newVal)
|
||||
}, { deep: true })
|
||||
|
||||
@@ -83,7 +87,7 @@ watch(currentModel, (newVal, oldVal) => {
|
||||
single-line
|
||||
density="compact"
|
||||
type="number"
|
||||
max="2048"
|
||||
:max="currentModelDefault.total_tokens"
|
||||
step="1"
|
||||
style="width: 100px"
|
||||
class="flex-grow-0"
|
||||
@@ -93,7 +97,7 @@ watch(currentModel, (newVal, oldVal) => {
|
||||
<v-col cols="12">
|
||||
<v-slider
|
||||
v-model="currentModel.max_tokens"
|
||||
:max="2048"
|
||||
:max="currentModelDefault.total_tokens"
|
||||
:step="1"
|
||||
hide-details
|
||||
>
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
const menu = ref(false)
|
||||
const prompts = ref([])
|
||||
const editingPrompt = ref(null)
|
||||
const newTitlePrompt = ref(null)
|
||||
const newPrompt = ref('')
|
||||
const submittingNewPrompt = ref(false)
|
||||
const promptInputErrorMessage = ref('')
|
||||
@@ -24,11 +25,13 @@ const addPrompt = async () => {
|
||||
const { data, error } = await useAuthFetch('/api/chat/prompts/', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
title: newTitlePrompt.value,
|
||||
prompt: newPrompt.value
|
||||
})
|
||||
})
|
||||
if (!error.value) {
|
||||
prompts.value.push(data.value)
|
||||
newTitlePrompt.value = null
|
||||
newPrompt.value = ''
|
||||
}
|
||||
submittingNewPrompt.value = false
|
||||
@@ -43,6 +46,7 @@ const updatePrompt = async (index) => {
|
||||
const { data, error } = await useAuthFetch(`/api/chat/prompts/${editingPrompt.value.id}/`, {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify({
|
||||
title: editingPrompt.value.title,
|
||||
prompt: editingPrompt.value.prompt
|
||||
})
|
||||
})
|
||||
@@ -96,10 +100,12 @@ onMounted( () => {
|
||||
<template v-slot:activator="{ props }">
|
||||
<v-btn
|
||||
v-bind="props"
|
||||
icon="speaker_notes"
|
||||
title="Common prompts"
|
||||
class="mr-3"
|
||||
></v-btn>
|
||||
icon
|
||||
>
|
||||
<v-icon
|
||||
icon="speaker_notes"
|
||||
></v-icon>
|
||||
</v-btn>
|
||||
</template>
|
||||
|
||||
<v-container>
|
||||
@@ -108,7 +114,7 @@ onMounted( () => {
|
||||
max-width="500"
|
||||
>
|
||||
<v-card-title>
|
||||
<span class="headline">Frequently prompts</span>
|
||||
<span class="headline">{{ $t('frequentlyPrompts') }}</span>
|
||||
</v-card-title>
|
||||
|
||||
<v-divider></v-divider>
|
||||
@@ -125,35 +131,47 @@ onMounted( () => {
|
||||
>
|
||||
<v-list-item
|
||||
active-color="primary"
|
||||
rounded="xl"
|
||||
v-if="editingPrompt && editingPrompt.id === prompt.id"
|
||||
>
|
||||
<v-textarea
|
||||
rows="2"
|
||||
v-model="editingPrompt.prompt"
|
||||
:loading="editingPrompt.updating"
|
||||
variant="underlined"
|
||||
hide-details
|
||||
density="compact"
|
||||
>
|
||||
<template v-slot:append>
|
||||
<div class="d-flex flex-column">
|
||||
<v-btn
|
||||
icon="done"
|
||||
variant="text"
|
||||
:loading="editingPrompt.updating"
|
||||
@click="updatePrompt(idx)"
|
||||
>
|
||||
</v-btn>
|
||||
<v-btn
|
||||
icon="close"
|
||||
variant="text"
|
||||
@click="cancelEditPrompt()"
|
||||
>
|
||||
</v-btn>
|
||||
</div>
|
||||
</template>
|
||||
</v-textarea>
|
||||
<div class="d-flex flex-row" :style="{ marginTop: '5px' }">
|
||||
<div class="flex-grow-1">
|
||||
<v-text-field
|
||||
v-model="editingPrompt.title"
|
||||
:loading="editingPrompt.updating"
|
||||
:label="$t('titlePrompt')"
|
||||
variant="underlined"
|
||||
density="compact"
|
||||
hide-details
|
||||
>
|
||||
</v-text-field>
|
||||
<v-textarea
|
||||
rows="2"
|
||||
v-model="editingPrompt.prompt"
|
||||
:loading="editingPrompt.updating"
|
||||
variant="underlined"
|
||||
density="compact"
|
||||
hide-details
|
||||
>
|
||||
</v-textarea>
|
||||
</div>
|
||||
<div>
|
||||
<div class="d-flex flex-column">
|
||||
<v-btn
|
||||
icon="done"
|
||||
variant="text"
|
||||
:loading="editingPrompt.updating"
|
||||
@click="updatePrompt(idx)"
|
||||
>
|
||||
</v-btn>
|
||||
<v-btn
|
||||
icon="close"
|
||||
variant="text"
|
||||
@click="cancelEditPrompt()"
|
||||
>
|
||||
</v-btn>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</v-list-item>
|
||||
<v-list-item
|
||||
v-if="!editingPrompt || editingPrompt.id !== prompt.id"
|
||||
@@ -161,7 +179,7 @@ onMounted( () => {
|
||||
active-color="primary"
|
||||
@click="selectPrompt(prompt)"
|
||||
>
|
||||
<v-list-item-title>{{ prompt.prompt }}</v-list-item-title>
|
||||
<v-list-item-title>{{ prompt.title ? prompt.title : prompt.prompt }}</v-list-item-title>
|
||||
<template v-slot:append>
|
||||
<v-btn
|
||||
icon="edit"
|
||||
@@ -182,6 +200,25 @@ onMounted( () => {
|
||||
</v-list-item>
|
||||
</template>
|
||||
|
||||
<v-list-item
|
||||
active-color="primary"
|
||||
>
|
||||
<div
|
||||
class="pt-3"
|
||||
>
|
||||
<v-text-field
|
||||
rows="1"
|
||||
v-model="newTitlePrompt"
|
||||
:label="$t('titlePrompt')"
|
||||
variant="outlined"
|
||||
density="compact"
|
||||
hide-details
|
||||
clearable
|
||||
>
|
||||
</v-text-field>
|
||||
</div>
|
||||
</v-list-item>
|
||||
|
||||
<v-list-item
|
||||
active-color="primary"
|
||||
>
|
||||
@@ -191,7 +228,7 @@ onMounted( () => {
|
||||
<v-textarea
|
||||
rows="2"
|
||||
v-model="newPrompt"
|
||||
label="Add a new prompt"
|
||||
:label="$t('addNewPrompt')"
|
||||
variant="outlined"
|
||||
density="compact"
|
||||
:error-messages="promptInputErrorMessage"
|
||||
@@ -209,7 +246,7 @@ onMounted( () => {
|
||||
@click="addPrompt()"
|
||||
>
|
||||
<v-icon icon="add"></v-icon>
|
||||
Add prompt
|
||||
{{ $t('addPrompt') }}
|
||||
</v-btn>
|
||||
</v-list-item>
|
||||
</v-list>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
export const useModels = () => useState('models', () => getStoredModels())
|
||||
// export const useModels = () => useState('models', () => getStoredModels())
|
||||
|
||||
export const useCurrentModel = () => useState('currentModel', () => getCurrentModel())
|
||||
|
||||
@@ -8,3 +8,5 @@ export const useApiKey = () => useState('apiKey', () => getStoredApiKey())
|
||||
export const useConversion = () => useState('conversion', () => getDefaultConversionData())
|
||||
|
||||
export const useConversions = () => useState('conversions', () => [])
|
||||
|
||||
export const useSettings = () => useState('settings', () => {})
|
||||
BIN
demos/bmc_qr.png
Normal file
BIN
demos/bmc_qr.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 64 KiB |
BIN
demos/demo.mp4
Normal file
BIN
demos/demo.mp4
Normal file
Binary file not shown.
16
docker-compose.dev.yml
Normal file
16
docker-compose.dev.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
version: '3'
|
||||
services:
|
||||
client:
|
||||
platform: linux/x86_64
|
||||
build: .
|
||||
environment:
|
||||
SERVER_DOMAIN: http://web-server
|
||||
ports:
|
||||
- '${CLIENT_PORT:-8080}:80'
|
||||
networks:
|
||||
- chatgpt_network
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
chatgpt_network:
|
||||
external: True
|
||||
@@ -1,12 +1,10 @@
|
||||
version: '3'
|
||||
services:
|
||||
client:
|
||||
platform: linux/x86_64
|
||||
image: wongsaang/chatgpt-ui-client:latest
|
||||
environment:
|
||||
- SERVER_DOMAIN=http://backend-web-server
|
||||
- NUXT_PUBLIC_APP_NAME='ChatGPT UI'
|
||||
- NUXT_PUBLIC_TYPEWRITER=true
|
||||
- NUXT_PUBLIC_TYPEWRITER_DELAY=100
|
||||
depends_on:
|
||||
- backend-web-server
|
||||
ports:
|
||||
@@ -15,6 +13,7 @@ services:
|
||||
- chatgpt_ui_network
|
||||
restart: always
|
||||
backend-wsgi-server:
|
||||
platform: linux/x86_64
|
||||
image: wongsaang/chatgpt-ui-wsgi-server:latest
|
||||
environment:
|
||||
- APP_DOMAIN=${APP_DOMAIN:-localhost:9000}
|
||||
@@ -29,12 +28,14 @@ services:
|
||||
# - EMAIL_HOST_USER=
|
||||
# - EMAIL_HOST_PASSWORD=
|
||||
# - EMAIL_USE_TLS=True
|
||||
# - EMAIL_FROM=no-reply@example.com #Default sender email address
|
||||
ports:
|
||||
- '${WSGI_PORT:-8000}:8000'
|
||||
networks:
|
||||
- chatgpt_ui_network
|
||||
restart: always
|
||||
backend-web-server:
|
||||
platform: linux/x86_64
|
||||
image: wongsaang/chatgpt-ui-web-server:latest
|
||||
environment:
|
||||
- BACKEND_URL=http://backend-wsgi-server:8000
|
||||
|
||||
@@ -1,14 +1,27 @@
|
||||
<p align="center">
|
||||
<img alt="demo" src="../../demos/demo.gif?v=1">
|
||||
</p>
|
||||
<div align="center">
|
||||
<h1>ChatGPT UI</h1>
|
||||
</div>
|
||||
|
||||
[English](../../README.md) | [中文](./docs/zh/README.md)
|
||||
|
||||
# ChatGPT UI
|
||||
|
||||
ChatGPT Web 客户端,支持多用户,支持 Mysql、PostgreSQL 等多种数据库连接进行数据持久化存储,支持多语言。提供 Docker 镜像和快速部署脚本。
|
||||
|
||||
https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-ab06-3f75b5235bfe.mp4
|
||||
|
||||
|
||||
## 📢 更新
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-27</strong></summary>
|
||||
🚀 支持 gpt-4 模型。你可以在前端的“模型参数”中选择模型,gpt-4 模型需要通过 openai 的白名单才能使用。
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-23</strong></summary>
|
||||
增加网页搜索能力,使得 ChatGPT 生成的回答更与时俱进!
|
||||
该功能默认处于关闭状态,你可以在管理后台的 `Chat->Settings` 中开启它,在 Settings 中有一个 `open_web_search` 的记录,把它的值设置为 True。
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-15</strong></summary>
|
||||
|
||||
@@ -16,17 +29,7 @@ ChatGPT Web 客户端,支持多用户,支持 Mysql、PostgreSQL 等多种数
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<summary><strong>2023-03-10</strong></summary>
|
||||
|
||||
增加 2 个环境变量来控制打字机效果, 详见下方 docker-compose 配置的环境变量说明
|
||||
|
||||
- `NUXT_PUBLIC_TYPEWRITER` 是否开启打字机效果
|
||||
- `NUXT_PUBLIC_TYPEWRITER_DELAY` 每个字的延迟时间,单位:毫秒
|
||||
|
||||
</details>
|
||||
|
||||
<details open>
|
||||
<details>
|
||||
<summary><strong>2023-03-04</strong></summary>
|
||||
|
||||
**使用最新的官方聊天模型** `gpt-3.5-turbo`
|
||||
@@ -88,9 +91,6 @@ services:
|
||||
image: wongsaang/chatgpt-ui-client:latest
|
||||
environment:
|
||||
- SERVER_DOMAIN=http://backend-web-server
|
||||
- NUXT_PUBLIC_APP_NAME='ChatGPT UI' # App 名称,默认为 ChatGPT UI
|
||||
- NUXT_PUBLIC_TYPEWRITER=true # 是否启用打字机效果,默认关闭
|
||||
- NUXT_PUBLIC_TYPEWRITER_DELAY=100 # 打字机效果的延迟时间,默认 50毫秒
|
||||
depends_on:
|
||||
- backend-web-server
|
||||
ports:
|
||||
@@ -113,6 +113,7 @@ services:
|
||||
# - EMAIL_HOST_USER=
|
||||
# - EMAIL_HOST_PASSWORD=
|
||||
# - EMAIL_USE_TLS=True
|
||||
# - EMAIL_FROM=no-reply@example.com #默认发件邮箱地址
|
||||
ports:
|
||||
- '8000:8000'
|
||||
networks:
|
||||
@@ -154,6 +155,16 @@ networks:
|
||||
现在可以访问客户端地址 `http(s)://your.domain` / `http://123.123.123.123` 开始聊天。
|
||||
|
||||
|
||||
## 续杯咖啡
|
||||
|
||||
> 如果对您有帮助,也是在帮助我自己.
|
||||
|
||||
如果你想支持我,给我续杯咖啡吧 ❤️ [https://www.buymeacoffee.com/WongSaang](https://www.buymeacoffee.com/WongSaang)
|
||||
|
||||
<p align="center">
|
||||
<img height="150" src="https://github.com/WongSaang/chatgpt-ui/blob/main/demos/bmc_qr.png?raw=true"/>
|
||||
</p>
|
||||
|
||||
## Development
|
||||
|
||||
### Setup
|
||||
|
||||
@@ -10,6 +10,10 @@
|
||||
"saveAndClose": "Save & Close",
|
||||
"pleaseSelectAtLeastOneModelDot": "Please select at least one model.",
|
||||
"writeAMessage": "Write a message",
|
||||
"frequentlyPrompts": "Frequently prompts",
|
||||
"addPrompt": "Add prompt",
|
||||
"titlePrompt": "Title",
|
||||
"addNewPrompt": "Add a new prompt",
|
||||
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "Press Enter to send your message or Shift+Enter to add a new line",
|
||||
"lightMode": "Light Mode",
|
||||
"darkMode": "Dark Mode",
|
||||
@@ -34,6 +38,8 @@
|
||||
"copied": "Copied",
|
||||
"delete": "Delete",
|
||||
"signOut": "Sign out",
|
||||
"webSearch": "Web Search",
|
||||
"webSearchDefaultPrompt": "Web search results:\n\n[web_results]\nCurrent date: [current_date]\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: [query]",
|
||||
"welcomeScreen": {
|
||||
"introduction1": "is an unofficial client for ChatGPT, but uses the official OpenAI API.",
|
||||
"introduction2": "You will need an OpenAI API Key before you can use this client.",
|
||||
|
||||
65
lang/ru-RU.json
Normal file
65
lang/ru-RU.json
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"welcomeTo": "Добро пожаловать в",
|
||||
"language": "Язык",
|
||||
"setApiKey": "Установить ключ API",
|
||||
"setOpenAIApiKey": "Установить ключ API OpenAI",
|
||||
"openAIApiKey": "Ключ API OpenAI",
|
||||
"getAKey": "Получить ключ",
|
||||
"openAIModels": "Модели OpenAI",
|
||||
"aboutTheModels": "О моделях",
|
||||
"saveAndClose": "Сохранить & Закрыть",
|
||||
"pleaseSelectAtLeastOneModelDot": "Выберите хотя бы одну модель.",
|
||||
"writeAMessage": "Напишите сообщение",
|
||||
"frequentlyPrompts": "Список подсказок",
|
||||
"addPrompt": "Добавить подсказку",
|
||||
"titlePrompt": "Заголовок",
|
||||
"addNewPrompt": "Добавитьте новую подсказку",
|
||||
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "Нажмите Enter, чтобы отправить сообщение, или Shift+Enter, чтобы добавить новую строку.",
|
||||
"lightMode": "Светлая",
|
||||
"darkMode": "Темная",
|
||||
"followSystem": "Системная",
|
||||
"themeMode": "Тема",
|
||||
"feedback": "Обратная связь",
|
||||
"newConversation": "Новый чат",
|
||||
"clearConversations": "Очистить чаты",
|
||||
"modelParameters": "Параметры модели",
|
||||
"model": "Модель",
|
||||
"temperature": "Temperature",
|
||||
"topP": "Top P",
|
||||
"frequencyPenalty": "Frequency Penalty",
|
||||
"presencePenalty": "Presence Penalty",
|
||||
"maxTokens": "Max Tokens",
|
||||
"roles": {
|
||||
"me": "Я",
|
||||
"ai": "AI"
|
||||
},
|
||||
"edit": "Редактировать",
|
||||
"copy": "Копировать",
|
||||
"copied": "Скопировано",
|
||||
"delete": "Удалить",
|
||||
"signOut": "Выход",
|
||||
"webSearch": "Поиск в интернете",
|
||||
"webSearchDefaultPrompt": "Результаты веб-поиска:\n\n[web_results]\nТекущая дата: [current_date]\n\nИнструкции: Используя предоставленные результаты веб-поиска, напишите развернутый ответ на заданный запрос. Обязательно цитируйте результаты, используя обозначение [[number](URL)] после ссылки. Если предоставленные результаты поиска относятся к нескольким темам с одинаковым названием, напишите отдельные ответы для каждой темы.\nЗапрос: [query]",
|
||||
"welcomeScreen": {
|
||||
"introduction1": "является неофициальным клиентом для ChatGPT, но использует официальный API OpenAI.",
|
||||
"introduction2": "Вам понадобится ключ API OpenAI, прежде чем вы сможете использовать этот клиент.",
|
||||
"examples": {
|
||||
"title": "Примеры",
|
||||
"item1": "\"Объясни, что такое квантовые вычисления простыми словами\"",
|
||||
"item2": "\"Предложи несколько креативных идей для дня рождения 10-летнего ребенка?\"",
|
||||
"item3": "\"Как сделать HTTP-запрос в Javascript?\""
|
||||
},
|
||||
"capabilities": {
|
||||
"title": "Возможности",
|
||||
"item1": "Помнит, что пользователь сказал ранее в разговоре",
|
||||
"item2": "Позволяет пользователю вносить последующие исправления",
|
||||
"item3": "Научен отклонять неуместные запросы"
|
||||
},
|
||||
"limitations": {
|
||||
"title": "Ограничения",
|
||||
"item1": "Иногда может генерировать неверную информацию",
|
||||
"item2": "Иногда может создавать вредные инструкции или предвзятый контент",
|
||||
"item3": "Ограниченное знание мира и событий после 2021 года"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,10 @@
|
||||
"saveAndClose": "保存并关闭",
|
||||
"pleaseSelectAtLeastOneModelDot": "请至少选择一个模型",
|
||||
"writeAMessage": "输入信息",
|
||||
"frequentlyPrompts": "Frequently prompts",
|
||||
"addPrompt": "Add prompt",
|
||||
"titlePrompt": "Title",
|
||||
"addNewPrompt": "Add a new prompt",
|
||||
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "按回车键发送您的信息,或按Shift+Enter键添加新行",
|
||||
"lightMode": "明亮模式",
|
||||
"darkMode": "暗色模式",
|
||||
@@ -34,6 +38,8 @@
|
||||
"copied": "已复制",
|
||||
"delete": "删除",
|
||||
"signOut": "退出登录",
|
||||
"webSearch": "网页搜索",
|
||||
"webSearchDefaultPrompt": "网络搜索结果:\n\n[web_results]\n当前日期:[current_date]\n\n说明:使用提供的网络搜索结果,对给定的查询写出全面的回复。确保在引用参考文献后使用 [[number](URL)] 符号进行引用结果. 如果提供的搜索结果涉及到多个具有相同名称的主题,请针对每个主题编写单独的答案。\n查询:[query]",
|
||||
"welcomeScreen": {
|
||||
"introduction1": "是一个非官方的ChatGPT客户端,但使用OpenAI的官方API",
|
||||
"introduction2": "在使用本客户端之前,您需要一个OpenAI API密钥。",
|
||||
|
||||
@@ -84,6 +84,7 @@ const loadConversations = async () => {
|
||||
|
||||
const {mdAndUp} = useDisplay()
|
||||
|
||||
|
||||
const drawerPermanent = computed(() => {
|
||||
return mdAndUp.value
|
||||
})
|
||||
@@ -97,8 +98,18 @@ const signOut = async () => {
|
||||
}
|
||||
}
|
||||
|
||||
onNuxtReady(async () => {
|
||||
const settings = useSettings()
|
||||
const showApiKeySetting = ref(false)
|
||||
watchEffect(() => {
|
||||
if (settings.value) {
|
||||
const settingsValue = toRaw(settings.value)
|
||||
showApiKeySetting.value = settingsValue.open_api_key_setting && settingsValue.open_api_key_setting === 'True'
|
||||
}
|
||||
})
|
||||
|
||||
onMounted(async () => {
|
||||
loadConversations()
|
||||
loadSettings()
|
||||
})
|
||||
|
||||
</script>
|
||||
@@ -237,6 +248,10 @@ onNuxtReady(async () => {
|
||||
</v-card>
|
||||
</v-dialog>
|
||||
|
||||
<ApiKeyDialog
|
||||
v-if="showApiKeySetting"
|
||||
/>
|
||||
|
||||
<ModelParameters/>
|
||||
|
||||
<v-menu
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// https://nuxt.com/docs/api/configuration/nuxt-config
|
||||
const appName = 'ChatGPT UI'
|
||||
const appName = process.env.NUXT_PUBLIC_APP_NAME ?? 'ChatGPT UI'
|
||||
|
||||
export default defineNuxtConfig({
|
||||
dev: false,
|
||||
@@ -14,6 +14,7 @@ export default defineNuxtConfig({
|
||||
appName: appName,
|
||||
typewriter: false,
|
||||
typewriterDelay: 50,
|
||||
customApiKey: false
|
||||
}
|
||||
},
|
||||
build: {
|
||||
@@ -53,6 +54,12 @@ export default defineNuxtConfig({
|
||||
iso: 'zh-CN',
|
||||
name: '简体中文',
|
||||
file: 'zh-CN.json',
|
||||
},
|
||||
{
|
||||
code: 'ru',
|
||||
iso: 'ru-RU',
|
||||
name: 'Русский',
|
||||
file: 'ru-RU.json',
|
||||
}
|
||||
],
|
||||
lazy: true,
|
||||
|
||||
@@ -5,8 +5,6 @@ definePageMeta({
|
||||
middleware: ["auth"]
|
||||
})
|
||||
import {EventStreamContentType, fetchEventSource} from '@microsoft/fetch-event-source'
|
||||
import { nextTick } from 'vue'
|
||||
import MessageActions from "~/components/MessageActions.vue";
|
||||
|
||||
const { $i18n, $auth } = useNuxtApp()
|
||||
const runtimeConfig = useRuntimeConfig()
|
||||
@@ -53,11 +51,19 @@ const abortFetch = () => {
|
||||
const fetchReply = async (message) => {
|
||||
ctrl = new AbortController()
|
||||
|
||||
let webSearchParams = {}
|
||||
if (enableWebSearch.value) {
|
||||
webSearchParams['web_search'] = {
|
||||
ua: navigator.userAgent,
|
||||
default_prompt: $i18n.t('webSearchDefaultPrompt')
|
||||
}
|
||||
}
|
||||
|
||||
const data = Object.assign({}, currentModel.value, {
|
||||
openaiApiKey: openaiApiKey.value,
|
||||
openaiApiKey: enableCustomApiKey.value ? openaiApiKey.value : null,
|
||||
message: message,
|
||||
conversationId: currentConversation.value.id
|
||||
})
|
||||
}, webSearchParams)
|
||||
|
||||
try {
|
||||
await fetchEventSource('/api/conversation/', {
|
||||
@@ -84,12 +90,13 @@ const fetchReply = async (message) => {
|
||||
throw err;
|
||||
},
|
||||
async onmessage(message) {
|
||||
// console.log(message)
|
||||
const event = message.event
|
||||
const data = JSON.parse(message.data)
|
||||
|
||||
if (event === 'error') {
|
||||
throw new Error(data.error);
|
||||
abortFetch()
|
||||
showSnackbar(data.error)
|
||||
return;
|
||||
}
|
||||
|
||||
if (event === 'userMessageId') {
|
||||
@@ -157,6 +164,20 @@ const deleteMessage = (index) => {
|
||||
currentConversation.value.messages.splice(index, 1)
|
||||
}
|
||||
|
||||
const showWebSearchToggle = ref(false)
|
||||
const enableWebSearch = ref(false)
|
||||
const enableCustomApiKey = ref(false)
|
||||
|
||||
const settings = useSettings()
|
||||
|
||||
watchEffect(() => {
|
||||
if (settings.value) {
|
||||
const settingsValue = toRaw(settings.value)
|
||||
showWebSearchToggle.value = settingsValue.open_web_search && settingsValue.open_web_search === 'True'
|
||||
enableCustomApiKey.value = settingsValue.open_api_key_setting && settingsValue.open_api_key_setting === 'True'
|
||||
}
|
||||
})
|
||||
|
||||
</script>
|
||||
|
||||
<template>
|
||||
@@ -197,21 +218,36 @@ const deleteMessage = (index) => {
|
||||
<div ref="grab" class="w-100" style="height: 200px;"></div>
|
||||
</div>
|
||||
<Welcome v-else />
|
||||
<v-footer app class="d-flex flex-column">
|
||||
<div class="px-md-16 w-100 d-flex align-center">
|
||||
<Prompt v-show="!fetchingResponse" :use-prompt="usePrompt" />
|
||||
<v-btn
|
||||
v-show="fetchingResponse"
|
||||
icon="close"
|
||||
title="stop"
|
||||
class="mr-3"
|
||||
@click="stop"
|
||||
></v-btn>
|
||||
<MsgEditor ref="editor" :send-message="send" :disabled="fetchingResponse" :loading="fetchingResponse" />
|
||||
</div>
|
||||
<v-footer app>
|
||||
<div class="px-md-16 w-100 d-flex flex-column">
|
||||
<div class="d-flex align-center">
|
||||
<v-btn
|
||||
v-show="fetchingResponse"
|
||||
icon="close"
|
||||
title="stop"
|
||||
class="mr-3"
|
||||
@click="stop"
|
||||
></v-btn>
|
||||
<MsgEditor ref="editor" :send-message="send" :disabled="fetchingResponse" :loading="fetchingResponse" />
|
||||
</div>
|
||||
<v-toolbar
|
||||
density="comfortable"
|
||||
color="transparent"
|
||||
>
|
||||
<Prompt v-show="!fetchingResponse" :use-prompt="usePrompt" />
|
||||
<v-switch
|
||||
v-if="showWebSearchToggle"
|
||||
v-model="enableWebSearch"
|
||||
hide-details
|
||||
color="primary"
|
||||
:label="$t('webSearch')"
|
||||
></v-switch>
|
||||
<v-spacer></v-spacer>
|
||||
</v-toolbar>
|
||||
|
||||
<div class="px-4 py-2 text-disabled text-caption font-weight-light text-center w-100">
|
||||
© {{ new Date().getFullYear() }} {{ runtimeConfig.public.appName }}
|
||||
<!-- <div class="py-2 text-disabled text-caption font-weight-light text-center">-->
|
||||
<!-- © {{ new Date().getFullYear() }} {{ runtimeConfig.public.appName }}-->
|
||||
<!-- </div>-->
|
||||
</div>
|
||||
</v-footer>
|
||||
<v-snackbar
|
||||
|
||||
@@ -5,11 +5,25 @@ export const STORAGE_KEY = {
|
||||
OPENAI_API_KEY: 'openai_api_key',
|
||||
}
|
||||
|
||||
export const DEFAULT_MODEL = {
|
||||
name: 'gpt-3.5-turbo',
|
||||
frequency_penalty: 0.0,
|
||||
presence_penalty: 0.0,
|
||||
max_tokens: 1000,
|
||||
temperature: 0.7,
|
||||
top_p: 1.0
|
||||
export const MODELS = {
|
||||
'gpt-3.5-turbo': {
|
||||
name: 'gpt-3.5-turbo',
|
||||
frequency_penalty: 0.0,
|
||||
presence_penalty: 0.0,
|
||||
total_tokens: 4096,
|
||||
max_tokens: 1000,
|
||||
temperature: 0.7,
|
||||
top_p: 1.0
|
||||
},
|
||||
'gpt-4': {
|
||||
name: 'gpt-4',
|
||||
frequency_penalty: 0.0,
|
||||
presence_penalty: 0.0,
|
||||
total_tokens: 8192,
|
||||
max_tokens: 2000,
|
||||
temperature: 0.7,
|
||||
top_p: 1.0
|
||||
}
|
||||
}
|
||||
|
||||
export const DEFAULT_MODEL_NAME = 'gpt-3.5-turbo'
|
||||
@@ -51,3 +51,22 @@ export const genTitle = async (conversationId) => {
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const transformData = (list) => {
|
||||
const result = {};
|
||||
for (let i = 0; i < list.length; i++) {
|
||||
const item = list[i];
|
||||
result[item.name] = item.value;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
export const loadSettings = async () => {
|
||||
const settings = useSettings()
|
||||
const { data, error } = await useAuthFetch('/api/chat/settings/', {
|
||||
method: 'GET'
|
||||
})
|
||||
if (!error.value) {
|
||||
settings.value = transformData(data.value)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
import {MODELS} from "~/utils/enums";
|
||||
|
||||
const get = (key) => {
|
||||
let val = localStorage.getItem(key)
|
||||
@@ -17,13 +18,13 @@ export const setModels = (val) => {
|
||||
models.value = val
|
||||
}
|
||||
|
||||
export const getStoredModels = () => {
|
||||
let models = get(STORAGE_KEY.MODELS)
|
||||
if (!models) {
|
||||
models = [DEFAULT_MODEL]
|
||||
}
|
||||
return models
|
||||
}
|
||||
// export const getStoredModels = () => {
|
||||
// let models = get(STORAGE_KEY.MODELS)
|
||||
// if (!models) {
|
||||
// models = [DEFAULT_MODEL]
|
||||
// }
|
||||
// return models
|
||||
// }
|
||||
|
||||
export const saveCurrentModel = (val) => {
|
||||
set(STORAGE_KEY.CURRENT_MODEL, val)
|
||||
@@ -32,7 +33,7 @@ export const saveCurrentModel = (val) => {
|
||||
export const getCurrentModel = () => {
|
||||
let model = get(STORAGE_KEY.CURRENT_MODEL)
|
||||
if (!model) {
|
||||
model = DEFAULT_MODEL
|
||||
model = MODELS[DEFAULT_MODEL_NAME]
|
||||
}
|
||||
return model
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user