Compare commits

..

3 Commits

12 changed files with 71 additions and 29 deletions

View File

@@ -6,11 +6,18 @@
A ChatGPT web client that supports multiple users, multiple database connections for persistent data storage, supports i18n. Provides Docker images and quick deployment scripts. A ChatGPT web client that supports multiple users, multiple database connections for persistent data storage, supports i18n. Provides Docker images and quick deployment scripts.
The server of this project[https://github.com/WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server)
https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-ab06-3f75b5235bfe.mp4 https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-ab06-3f75b5235bfe.mp4
## 📢Updates ## 📢Updates
<details open>
<summary><strong>2023-04-06</strong></summary>
The client is now deployed as server-side rendering (SSR), and the environment variables are now available, see docker-compose configuration below for available environment variables. Improved first screen loading speed and reduced white screen time.
</details>
<details open> <details open>
<summary><strong>2023-03-27</strong></summary> <summary><strong>2023-03-27</strong></summary>
🚀 Support gpt-4 model. You can select the model in the "Model Parameters" of the front-end. 🚀 Support gpt-4 model. You can select the model in the "Model Parameters" of the front-end.
@@ -95,6 +102,9 @@ services:
image: wongsaang/chatgpt-ui-client:latest image: wongsaang/chatgpt-ui-client:latest
environment: environment:
- SERVER_DOMAIN=http://backend-web-server - SERVER_DOMAIN=http://backend-web-server
# - NUXT_PUBLIC_APP_NAME='ChatGPT UI' # The name of the application
# - NUXT_PUBLIC_TYPEWRITER=true # Whether to enable the typewriter effect, default false
# - NUXT_PUBLIC_TYPEWRITER_DELAY=50 # The delay time of the typewriter effect, default 50ms
depends_on: depends_on:
- backend-web-server - backend-web-server
ports: ports:

View File

@@ -1,3 +1,12 @@
<script setup>
onNuxtReady(() => {
fetchSystemSettings()
// api key
const apiKey = useApiKey()
apiKey.value = getStoredApiKey()
})
</script>
<template> <template>
<NuxtLayout> <NuxtLayout>
<NuxtLoadingIndicator /> <NuxtLoadingIndicator />

View File

@@ -166,18 +166,20 @@ const deleteMessage = (index) => {
props.conversation.messages.splice(index, 1) props.conversation.messages.splice(index, 1)
} }
const showWebSearchToggle = ref(false)
const enableWebSearch = ref(false)
const enableCustomApiKey = ref(false)
const settings = useSettings() const settings = useSettings()
const enableWebSearch = ref(false)
watchEffect(() => { const showWebSearchToggle = computed(() => {
if (settings.value) { return settings.value && settings.value.open_web_search && settings.value.open_web_search === 'True'
const settingsValue = toRaw(settings.value) })
showWebSearchToggle.value = settingsValue.open_web_search && settingsValue.open_web_search === 'True'
enableCustomApiKey.value = settingsValue.open_api_key_setting && settingsValue.open_api_key_setting === 'True' const enableCustomApiKey = computed(() => {
} return settings.value && settings.value.open_api_key_setting && settings.value.open_api_key_setting === 'True'
})
onNuxtReady(() => {
currentModel.value = getCurrentModel()
}) })
</script> </script>

View File

@@ -8,10 +8,13 @@ const availableModels = [
] ]
const currentModelDefault = ref(MODELS[currentModel.value.name]) const currentModelDefault = ref(MODELS[currentModel.value.name])
watch(currentModel, (newVal, oldVal) => { onNuxtReady(() => {
currentModelDefault.value = MODELS[newVal.name] currentModel.value = getCurrentModel()
saveCurrentModel(newVal) watch(currentModel, (newVal, oldVal) => {
}, { deep: true }) currentModelDefault.value = MODELS[newVal.name]
saveCurrentModel(newVal)
}, { deep: true })
})
</script> </script>
@@ -53,7 +56,7 @@ watch(currentModel, (newVal, oldVal) => {
<div class="d-flex justify-space-between align-center"> <div class="d-flex justify-space-between align-center">
<v-list-subheader>{{ $t('temperature') }}</v-list-subheader> <v-list-subheader>{{ $t('temperature') }}</v-list-subheader>
<v-text-field <v-text-field
v-model="currentModel.temperature" v-model.number="currentModel.temperature"
hide-details hide-details
single-line single-line
density="compact" density="compact"
@@ -82,7 +85,7 @@ watch(currentModel, (newVal, oldVal) => {
<div class="d-flex justify-space-between align-center"> <div class="d-flex justify-space-between align-center">
<v-list-subheader>{{ $t('maxTokens') }}</v-list-subheader> <v-list-subheader>{{ $t('maxTokens') }}</v-list-subheader>
<v-text-field <v-text-field
v-model="currentModel.max_tokens" v-model.number="currentModel.max_tokens"
hide-details hide-details
single-line single-line
density="compact" density="compact"
@@ -93,6 +96,9 @@ watch(currentModel, (newVal, oldVal) => {
class="flex-grow-0" class="flex-grow-0"
></v-text-field> ></v-text-field>
</div> </div>
<div class="text-caption">
{{ $t('maxTokenTips1') }} <b>{{ currentModelDefault.total_tokens }}</b> {{ $t('maxTokenTips2') }}
</div>
</v-col> </v-col>
<v-col cols="12"> <v-col cols="12">
<v-slider <v-slider
@@ -111,7 +117,7 @@ watch(currentModel, (newVal, oldVal) => {
<div class="d-flex justify-space-between align-center"> <div class="d-flex justify-space-between align-center">
<v-list-subheader>{{ $t('topP') }}</v-list-subheader> <v-list-subheader>{{ $t('topP') }}</v-list-subheader>
<v-text-field <v-text-field
v-model="currentModel.top_p" v-model.number="currentModel.top_p"
hide-details hide-details
single-line single-line
density="compact" density="compact"
@@ -138,7 +144,7 @@ watch(currentModel, (newVal, oldVal) => {
<div class="d-flex justify-space-between align-center"> <div class="d-flex justify-space-between align-center">
<v-list-subheader>{{ $t('frequencyPenalty') }}</v-list-subheader> <v-list-subheader>{{ $t('frequencyPenalty') }}</v-list-subheader>
<v-text-field <v-text-field
v-model="currentModel.frequency_penalty" v-model.number="currentModel.frequency_penalty"
hide-details hide-details
single-line single-line
density="compact" density="compact"
@@ -164,7 +170,7 @@ watch(currentModel, (newVal, oldVal) => {
<div class="d-flex justify-space-between align-center"> <div class="d-flex justify-space-between align-center">
<v-list-subheader>{{ $t('presencePenalty') }}</v-list-subheader> <v-list-subheader>{{ $t('presencePenalty') }}</v-list-subheader>
<v-text-field <v-text-field
v-model="currentModel.presence_penalty" v-model.number="currentModel.presence_penalty"
hide-details hide-details
single-line single-line
density="compact" density="compact"

View File

@@ -89,12 +89,8 @@ const loadConversations = async () => {
} }
const settings = useSettings() const settings = useSettings()
const showApiKeySetting = ref(false) const showApiKeySetting = computed(() => {
watchEffect(() => { return settings.value && settings.value.open_api_key_setting && settings.value.open_api_key_setting === 'True'
if (settings.value) {
const settingsValue = toRaw(settings.value)
showApiKeySetting.value = settingsValue.open_api_key_setting && settingsValue.open_api_key_setting === 'True'
}
}) })
const signOut = async () => { const signOut = async () => {

View File

@@ -7,7 +7,7 @@ export const useApiKey = () => useState('apiKey', () => getStoredApiKey())
export const useConversations = () => useState('conversations', () => []) export const useConversations = () => useState('conversations', () => [])
export const useSettings = () => useState('settings', () => getSystemSettings()) export const useSettings = () => useState('settings', () => {})
export const useUser = () => useState('user', () => null) export const useUser = () => useState('user', () => null)

View File

@@ -5,6 +5,9 @@ services:
image: wongsaang/chatgpt-ui-client:latest image: wongsaang/chatgpt-ui-client:latest
environment: environment:
- SERVER_DOMAIN=http://backend-web-server - SERVER_DOMAIN=http://backend-web-server
# - NUXT_PUBLIC_APP_NAME='ChatGPT UI' # The name of the application
# - NUXT_PUBLIC_TYPEWRITER=true # Whether to enable the typewriter effect, default false
# - NUXT_PUBLIC_TYPEWRITER_DELAY=50 # The delay time of the typewriter effect, default 50ms
depends_on: depends_on:
- backend-web-server - backend-web-server
ports: ports:

View File

@@ -6,11 +6,18 @@
ChatGPT Web 客户端,支持多用户,支持 Mysql、PostgreSQL 等多种数据库连接进行数据持久化存储,支持多语言。提供 Docker 镜像和快速部署脚本。 ChatGPT Web 客户端,支持多用户,支持 Mysql、PostgreSQL 等多种数据库连接进行数据持久化存储,支持多语言。提供 Docker 镜像和快速部署脚本。
本项目的服务端:[https://github.com/WongSaang/chatgpt-ui-server](https://github.com/WongSaang/chatgpt-ui-server)
https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-ab06-3f75b5235bfe.mp4 https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-ab06-3f75b5235bfe.mp4
## 📢 更新 ## 📢 更新
<details open>
<summary><strong>2023-04-06</strong></summary>
客户端改成服务端渲染SSR的方式部署现在可以使用环境变量了可用环境变量请看下方 docker-compose 配置。提升了首屏加载速度,减少白屏时间。
</details>
<details open> <details open>
<summary><strong>2023-03-27</strong></summary> <summary><strong>2023-03-27</strong></summary>
🚀 支持 gpt-4 模型。你可以在前端的“模型参数”中选择模型gpt-4 模型需要通过 openai 的白名单才能使用。 🚀 支持 gpt-4 模型。你可以在前端的“模型参数”中选择模型gpt-4 模型需要通过 openai 的白名单才能使用。
@@ -91,6 +98,9 @@ services:
image: wongsaang/chatgpt-ui-client:latest image: wongsaang/chatgpt-ui-client:latest
environment: environment:
- SERVER_DOMAIN=http://backend-web-server - SERVER_DOMAIN=http://backend-web-server
# - NUXT_PUBLIC_APP_NAME='ChatGPT UI' # APP 名称
# - NUXT_PUBLIC_TYPEWRITER=true # 是否开启 打字机 效果
# - NUXT_PUBLIC_TYPEWRITER_DELAY=50 # 打字机效果的延迟时间单位毫秒默认50
depends_on: depends_on:
- backend-web-server - backend-web-server
ports: ports:

View File

@@ -50,6 +50,8 @@
"webSearch": "Web Search", "webSearch": "Web Search",
"webSearchDefaultPrompt": "Web search results:\n\n[web_results]\nCurrent date: [current_date]\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: [query]", "webSearchDefaultPrompt": "Web search results:\n\n[web_results]\nCurrent date: [current_date]\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: [query]",
"genTitlePrompt": "Generate a short title for the following content, no more than 10 words. \n\nContent: ", "genTitlePrompt": "Generate a short title for the following content, no more than 10 words. \n\nContent: ",
"maxTokenTips1": "The maximum context length of the current model is",
"maxTokenTips2": "token, which includes the length of the prompt and the length of the generated text. The `Max Tokens` here refers to the length of the generated text. Therefore, you should leave some space for your prompt and not set it too large or to the maximum.",
"welcomeScreen": { "welcomeScreen": {
"introduction1": "is an unofficial client for ChatGPT, but uses the official OpenAI API.", "introduction1": "is an unofficial client for ChatGPT, but uses the official OpenAI API.",
"introduction2": "You will need an OpenAI API Key before you can use this client.", "introduction2": "You will need an OpenAI API Key before you can use this client.",

View File

@@ -50,6 +50,8 @@
"webSearch": "Поиск в интернете", "webSearch": "Поиск в интернете",
"webSearchDefaultPrompt": "Результаты веб-поиска:\n\n[web_results]\nТекущая дата: [current_date]\n\nИнструкции: Используя предоставленные результаты веб-поиска, напишите развернутый ответ на заданный запрос. Обязательно цитируйте результаты, используя обозначение [[number](URL)] после ссылки. Если предоставленные результаты поиска относятся к нескольким темам с одинаковым названием, напишите отдельные ответы для каждой темы.\nЗапрос: [query]", "webSearchDefaultPrompt": "Результаты веб-поиска:\n\n[web_results]\nТекущая дата: [current_date]\n\nИнструкции: Используя предоставленные результаты веб-поиска, напишите развернутый ответ на заданный запрос. Обязательно цитируйте результаты, используя обозначение [[number](URL)] после ссылки. Если предоставленные результаты поиска относятся к нескольким темам с одинаковым названием, напишите отдельные ответы для каждой темы.\nЗапрос: [query]",
"genTitlePrompt": "Придумайте короткий заголовок для следующего содержания, не более 10 слов. \n\nСодержание: ", "genTitlePrompt": "Придумайте короткий заголовок для следующего содержания, не более 10 слов. \n\nСодержание: ",
"maxTokenTips1": "The maximum context length of the current model is",
"maxTokenTips2": "token, which includes the length of the prompt and the length of the generated text. The `Max Tokens` here refers to the length of the generated text. Therefore, you should leave some space for your prompt and not set it too large or to the maximum.",
"welcomeScreen": { "welcomeScreen": {
"introduction1": "является неофициальным клиентом для ChatGPT, но использует официальный API OpenAI.", "introduction1": "является неофициальным клиентом для ChatGPT, но использует официальный API OpenAI.",
"introduction2": "Вам понадобится ключ API OpenAI, прежде чем вы сможете использовать этот клиент.", "introduction2": "Вам понадобится ключ API OpenAI, прежде чем вы сможете использовать этот клиент.",

View File

@@ -50,6 +50,8 @@
"webSearch": "网页搜索", "webSearch": "网页搜索",
"webSearchDefaultPrompt": "网络搜索结果:\n\n[web_results]\n当前日期[current_date]\n\n说明使用提供的网络搜索结果对给定的查询写出全面的回复。确保在引用参考文献后使用 [[number](URL)] 符号进行引用结果. 如果提供的搜索结果涉及到多个具有相同名称的主题,请针对每个主题编写单独的答案。\n查询[query]", "webSearchDefaultPrompt": "网络搜索结果:\n\n[web_results]\n当前日期[current_date]\n\n说明使用提供的网络搜索结果对给定的查询写出全面的回复。确保在引用参考文献后使用 [[number](URL)] 符号进行引用结果. 如果提供的搜索结果涉及到多个具有相同名称的主题,请针对每个主题编写单独的答案。\n查询[query]",
"genTitlePrompt": "为以下内容生成一个不超过10个字的简短标题。 \n\n内容: ", "genTitlePrompt": "为以下内容生成一个不超过10个字的简短标题。 \n\n内容: ",
"maxTokenTips1": "当前模型的最大上下文长度为",
"maxTokenTips2": "个 token它包括了指令的长度和生成的文本长度。此处的最大 token 数量是指生成的文本长度。所以您应该为您的指令预留一些空间,不宜设置过大或拉满。",
"welcomeScreen": { "welcomeScreen": {
"introduction1": "是一个非官方的ChatGPT客户端但使用OpenAI的官方API", "introduction1": "是一个非官方的ChatGPT客户端但使用OpenAI的官方API",
"introduction2": "在使用本客户端之前您需要一个OpenAI API密钥。", "introduction2": "在使用本客户端之前您需要一个OpenAI API密钥。",

View File

@@ -53,14 +53,14 @@ const transformData = (list) => {
return result; return result;
} }
export const getSystemSettings = async () => { export const fetchSystemSettings = async () => {
const { data, error } = await useAuthFetch('/api/chat/settings/', { const { data, error } = await useAuthFetch('/api/chat/settings/', {
method: 'GET', method: 'GET',
}) })
if (!error.value) { if (!error.value) {
return transformData(data.value) const settings = useSettings()
settings.value = transformData(data.value)
} }
return {}
} }
export const fetchUser = async () => { export const fetchUser = async () => {