Compare commits

...

11 Commits

Author SHA1 Message Date
Wong Saang
d89d1e288d Merge pull request #97 from erritis/main
Add title for prompt
2023-03-30 09:31:37 +08:00
Sergey Shekhovtsov
cd89d11d0b Add docker compose file for development
Added docker compose file for development

For convenience when developing
2023-03-28 22:36:53 +03:00
Erritis
cf0053a060 Add title for prompt
Added title for prompt

The title will simplify the search in the list of prompts
2023-03-28 22:35:12 +03:00
Wong Saang
019da4399e Merge pull request #93 from erritis/russian
Add Russian language support
2023-03-28 12:49:37 +08:00
Wong Saang
044961bb01 Merge pull request #90 from erritis/main
Add prompt fields to the translation section
2023-03-28 12:34:33 +08:00
Erritis
2374c81edb Add Russian language support 2023-03-28 04:02:56 +03:00
Erritis
699760713e Add prompt fields to the translation section
Added prompt fields to the translation section

Not all interface elements had the ability to add translation
2023-03-28 03:03:23 +03:00
Rafi
d75413cc49 update readme 2023-03-27 22:26:47 +08:00
Rafi
8175f199d2 Support GPT-4 2023-03-27 22:17:19 +08:00
Wong Saang
f8c2f396c1 Merge pull request #70 from Paramon/main
#50 add platform to docker-compose
2023-03-25 21:03:50 +08:00
Andrii Paramonov
8217647df8 #50 add platform 2023-03-24 16:44:00 +02:00
14 changed files with 222 additions and 85 deletions

View File

@@ -10,6 +10,13 @@ https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-
## 📢Updates
<details open>
<summary><strong>2023-03-27</strong></summary>
🚀 Support gpt-4 model. You can select the model in the "Model Parameters" of the front-end.
The GPT-4 model requires whitelist access from OpenAI.
</details>
<details open>
<summary><strong>2023-03-23</strong></summary>
Added web search capability to generate more relevant and up-to-date answers from ChatGPT!
@@ -25,17 +32,7 @@ Add "open_registration" setting option in the admin panel to control whether use
</details>
<details open>
<summary><strong>2023-03-10</strong></summary>
Add 2 environment variables to control the typewriter effect:
- `NUXT_PUBLIC_TYPEWRITER=true` to enable/disable the typewriter effect
- `NUXT_PUBLIC_TYPEWRITER_DELAY=50` to set the delay time for each character in milliseconds.
</details>
<details open>
<details>
<summary><strong>2023-03-04</strong></summary>
**Update to the latest official chat model** `gpt-3.5-turbo`
@@ -98,9 +95,6 @@ services:
image: wongsaang/chatgpt-ui-client:latest
environment:
- SERVER_DOMAIN=http://backend-web-server
- NUXT_PUBLIC_APP_NAME='ChatGPT UI' # App name
- NUXT_PUBLIC_TYPEWRITER=true # Enable typewriter effect, default is false
- NUXT_PUBLIC_TYPEWRITER_DELAY=100 # Typewriter effect delay time, default is 50ms
depends_on:
- backend-web-server
ports:

View File

@@ -1,11 +1,15 @@
<script setup>
const dialog = ref(false)
const currentModel = useCurrentModel()
const availableModels = [
DEFAULT_MODEL.name
'gpt-3.5-turbo',
'gpt-4'
]
const currentModelDefault = ref(MODELS[currentModel.value.name])
watch(currentModel, (newVal, oldVal) => {
currentModelDefault.value = MODELS[newVal.name]
saveCurrentModel(newVal)
}, { deep: true })
@@ -83,7 +87,7 @@ watch(currentModel, (newVal, oldVal) => {
single-line
density="compact"
type="number"
max="2048"
:max="currentModelDefault.total_tokens"
step="1"
style="width: 100px"
class="flex-grow-0"
@@ -93,7 +97,7 @@ watch(currentModel, (newVal, oldVal) => {
<v-col cols="12">
<v-slider
v-model="currentModel.max_tokens"
:max="2048"
:max="currentModelDefault.total_tokens"
:step="1"
hide-details
>

View File

@@ -2,6 +2,7 @@
const menu = ref(false)
const prompts = ref([])
const editingPrompt = ref(null)
const newTitlePrompt = ref(null)
const newPrompt = ref('')
const submittingNewPrompt = ref(false)
const promptInputErrorMessage = ref('')
@@ -24,11 +25,13 @@ const addPrompt = async () => {
const { data, error } = await useAuthFetch('/api/chat/prompts/', {
method: 'POST',
body: JSON.stringify({
title: newTitlePrompt.value,
prompt: newPrompt.value
})
})
if (!error.value) {
prompts.value.push(data.value)
newTitlePrompt.value = null
newPrompt.value = ''
}
submittingNewPrompt.value = false
@@ -43,6 +46,7 @@ const updatePrompt = async (index) => {
const { data, error } = await useAuthFetch(`/api/chat/prompts/${editingPrompt.value.id}/`, {
method: 'PUT',
body: JSON.stringify({
title: editingPrompt.value.title,
prompt: editingPrompt.value.prompt
})
})
@@ -110,7 +114,7 @@ onMounted( () => {
max-width="500"
>
<v-card-title>
<span class="headline">Frequently prompts</span>
<span class="headline">{{ $t('frequentlyPrompts') }}</span>
</v-card-title>
<v-divider></v-divider>
@@ -127,18 +131,30 @@ onMounted( () => {
>
<v-list-item
active-color="primary"
rounded="xl"
v-if="editingPrompt && editingPrompt.id === prompt.id"
>
<div class="d-flex flex-row" :style="{ marginTop: '5px' }">
<div class="flex-grow-1">
<v-text-field
v-model="editingPrompt.title"
:loading="editingPrompt.updating"
:label="$t('titlePrompt')"
variant="underlined"
density="compact"
hide-details
>
</v-text-field>
<v-textarea
rows="2"
v-model="editingPrompt.prompt"
:loading="editingPrompt.updating"
variant="underlined"
hide-details
density="compact"
hide-details
>
<template v-slot:append>
</v-textarea>
</div>
<div>
<div class="d-flex flex-column">
<v-btn
icon="done"
@@ -154,8 +170,8 @@ onMounted( () => {
>
</v-btn>
</div>
</template>
</v-textarea>
</div>
</div>
</v-list-item>
<v-list-item
v-if="!editingPrompt || editingPrompt.id !== prompt.id"
@@ -163,7 +179,7 @@ onMounted( () => {
active-color="primary"
@click="selectPrompt(prompt)"
>
<v-list-item-title>{{ prompt.prompt }}</v-list-item-title>
<v-list-item-title>{{ prompt.title ? prompt.title : prompt.prompt }}</v-list-item-title>
<template v-slot:append>
<v-btn
icon="edit"
@@ -184,6 +200,25 @@ onMounted( () => {
</v-list-item>
</template>
<v-list-item
active-color="primary"
>
<div
class="pt-3"
>
<v-text-field
rows="1"
v-model="newTitlePrompt"
:label="$t('titlePrompt')"
variant="outlined"
density="compact"
hide-details
clearable
>
</v-text-field>
</div>
</v-list-item>
<v-list-item
active-color="primary"
>
@@ -193,7 +228,7 @@ onMounted( () => {
<v-textarea
rows="2"
v-model="newPrompt"
label="Add a new prompt"
:label="$t('addNewPrompt')"
variant="outlined"
density="compact"
:error-messages="promptInputErrorMessage"
@@ -211,7 +246,7 @@ onMounted( () => {
@click="addPrompt()"
>
<v-icon icon="add"></v-icon>
Add prompt
{{ $t('addPrompt') }}
</v-btn>
</v-list-item>
</v-list>

View File

@@ -1,5 +1,5 @@
export const useModels = () => useState('models', () => getStoredModels())
// export const useModels = () => useState('models', () => getStoredModels())
export const useCurrentModel = () => useState('currentModel', () => getCurrentModel())

16
docker-compose.dev.yml Normal file
View File

@@ -0,0 +1,16 @@
version: '3'
services:
client:
platform: linux/x86_64
build: .
environment:
SERVER_DOMAIN: http://web-server
ports:
- '${CLIENT_PORT:-8080}:80'
networks:
- chatgpt_network
restart: always
networks:
chatgpt_network:
external: True

View File

@@ -1,12 +1,10 @@
version: '3'
services:
client:
platform: linux/x86_64
image: wongsaang/chatgpt-ui-client:latest
environment:
- SERVER_DOMAIN=http://backend-web-server
- NUXT_PUBLIC_APP_NAME='ChatGPT UI'
- NUXT_PUBLIC_TYPEWRITER=true
- NUXT_PUBLIC_TYPEWRITER_DELAY=100
depends_on:
- backend-web-server
ports:
@@ -15,6 +13,7 @@ services:
- chatgpt_ui_network
restart: always
backend-wsgi-server:
platform: linux/x86_64
image: wongsaang/chatgpt-ui-wsgi-server:latest
environment:
- APP_DOMAIN=${APP_DOMAIN:-localhost:9000}
@@ -36,6 +35,7 @@ services:
- chatgpt_ui_network
restart: always
backend-web-server:
platform: linux/x86_64
image: wongsaang/chatgpt-ui-web-server:latest
environment:
- BACKEND_URL=http://backend-wsgi-server:8000

View File

@@ -10,6 +10,12 @@ https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-
## 📢 更新
<details open>
<summary><strong>2023-03-27</strong></summary>
🚀 支持 gpt-4 模型。你可以在前端的“模型参数”中选择模型gpt-4 模型需要通过 openai 的白名单才能使用。
</details>
<details open>
<summary><strong>2023-03-23</strong></summary>
增加网页搜索能力,使得 ChatGPT 生成的回答更与时俱进!
@@ -23,17 +29,7 @@ https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-
</details>
<details open>
<summary><strong>2023-03-10</strong></summary>
增加 2 个环境变量来控制打字机效果, 详见下方 docker-compose 配置的环境变量说明
- `NUXT_PUBLIC_TYPEWRITER` 是否开启打字机效果
- `NUXT_PUBLIC_TYPEWRITER_DELAY` 每个字的延迟时间,单位:毫秒
</details>
<details open>
<details>
<summary><strong>2023-03-04</strong></summary>
**使用最新的官方聊天模型** `gpt-3.5-turbo`
@@ -95,9 +91,6 @@ services:
image: wongsaang/chatgpt-ui-client:latest
environment:
- SERVER_DOMAIN=http://backend-web-server
- NUXT_PUBLIC_APP_NAME='ChatGPT UI' # App 名称,默认为 ChatGPT UI
- NUXT_PUBLIC_TYPEWRITER=true # 是否启用打字机效果,默认关闭
- NUXT_PUBLIC_TYPEWRITER_DELAY=100 # 打字机效果的延迟时间,默认 50毫秒
depends_on:
- backend-web-server
ports:

View File

@@ -10,6 +10,10 @@
"saveAndClose": "Save & Close",
"pleaseSelectAtLeastOneModelDot": "Please select at least one model.",
"writeAMessage": "Write a message",
"frequentlyPrompts": "Frequently prompts",
"addPrompt": "Add prompt",
"titlePrompt": "Title",
"addNewPrompt": "Add a new prompt",
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "Press Enter to send your message or Shift+Enter to add a new line",
"lightMode": "Light Mode",
"darkMode": "Dark Mode",

65
lang/ru-RU.json Normal file
View File

@@ -0,0 +1,65 @@
{
"welcomeTo": "Добро пожаловать в",
"language": "Язык",
"setApiKey": "Установить ключ API",
"setOpenAIApiKey": "Установить ключ API OpenAI",
"openAIApiKey": "Ключ API OpenAI",
"getAKey": "Получить ключ",
"openAIModels": "Модели OpenAI",
"aboutTheModels": "О моделях",
"saveAndClose": "Сохранить & Закрыть",
"pleaseSelectAtLeastOneModelDot": "Выберите хотя бы одну модель.",
"writeAMessage": "Напишите сообщение",
"frequentlyPrompts": "Список подсказок",
"addPrompt": "Добавить подсказку",
"titlePrompt": "Заголовок",
"addNewPrompt": "Добавитьте новую подсказку",
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "Нажмите Enter, чтобы отправить сообщение, или Shift+Enter, чтобы добавить новую строку.",
"lightMode": "Светлая",
"darkMode": "Темная",
"followSystem": "Системная",
"themeMode": "Тема",
"feedback": "Обратная связь",
"newConversation": "Новый чат",
"clearConversations": "Очистить чаты",
"modelParameters": "Параметры модели",
"model": "Модель",
"temperature": "Temperature",
"topP": "Top P",
"frequencyPenalty": "Frequency Penalty",
"presencePenalty": "Presence Penalty",
"maxTokens": "Max Tokens",
"roles": {
"me": "Я",
"ai": "AI"
},
"edit": "Редактировать",
"copy": "Копировать",
"copied": "Скопировано",
"delete": "Удалить",
"signOut": "Выход",
"webSearch": "Поиск в интернете",
"webSearchDefaultPrompt": "Результаты веб-поиска:\n\n[web_results]\nТекущая дата: [current_date]\n\nИнструкции: Используя предоставленные результаты веб-поиска, напишите развернутый ответ на заданный запрос. Обязательно цитируйте результаты, используя обозначение [[number](URL)] после ссылки. Если предоставленные результаты поиска относятся к нескольким темам с одинаковым названием, напишите отдельные ответы для каждой темы.\nЗапрос: [query]",
"welcomeScreen": {
"introduction1": "является неофициальным клиентом для ChatGPT, но использует официальный API OpenAI.",
"introduction2": "Вам понадобится ключ API OpenAI, прежде чем вы сможете использовать этот клиент.",
"examples": {
"title": "Примеры",
"item1": "\"Объясни, что такое квантовые вычисления простыми словами\"",
"item2": "\"Предложи несколько креативных идей для дня рождения 10-летнего ребенка?\"",
"item3": "\"Как сделать HTTP-запрос в Javascript?\""
},
"capabilities": {
"title": "Возможности",
"item1": "Помнит, что пользователь сказал ранее в разговоре",
"item2": "Позволяет пользователю вносить последующие исправления",
"item3": "Научен отклонять неуместные запросы"
},
"limitations": {
"title": "Ограничения",
"item1": "Иногда может генерировать неверную информацию",
"item2": "Иногда может создавать вредные инструкции или предвзятый контент",
"item3": "Ограниченное знание мира и событий после 2021 года"
}
}
}

View File

@@ -10,6 +10,10 @@
"saveAndClose": "保存并关闭",
"pleaseSelectAtLeastOneModelDot": "请至少选择一个模型",
"writeAMessage": "输入信息",
"frequentlyPrompts": "Frequently prompts",
"addPrompt": "Add prompt",
"titlePrompt": "Title",
"addNewPrompt": "Add a new prompt",
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "按回车键发送您的信息或按Shift+Enter键添加新行",
"lightMode": "明亮模式",
"darkMode": "暗色模式",

View File

@@ -54,6 +54,12 @@ export default defineNuxtConfig({
iso: 'zh-CN',
name: '简体中文',
file: 'zh-CN.json',
},
{
code: 'ru',
iso: 'ru-RU',
name: 'Русский',
file: 'ru-RU.json',
}
],
lazy: true,

View File

@@ -90,12 +90,13 @@ const fetchReply = async (message) => {
throw err;
},
async onmessage(message) {
// console.log(message)
const event = message.event
const data = JSON.parse(message.data)
if (event === 'error') {
throw new Error(data.error);
abortFetch()
showSnackbar(data.error)
return;
}
if (event === 'userMessageId') {

View File

@@ -5,11 +5,25 @@ export const STORAGE_KEY = {
OPENAI_API_KEY: 'openai_api_key',
}
export const DEFAULT_MODEL = {
export const MODELS = {
'gpt-3.5-turbo': {
name: 'gpt-3.5-turbo',
frequency_penalty: 0.0,
presence_penalty: 0.0,
total_tokens: 4096,
max_tokens: 1000,
temperature: 0.7,
top_p: 1.0
},
'gpt-4': {
name: 'gpt-4',
frequency_penalty: 0.0,
presence_penalty: 0.0,
total_tokens: 8192,
max_tokens: 2000,
temperature: 0.7,
top_p: 1.0
}
}
export const DEFAULT_MODEL_NAME = 'gpt-3.5-turbo'

View File

@@ -1,3 +1,4 @@
import {MODELS} from "~/utils/enums";
const get = (key) => {
let val = localStorage.getItem(key)
@@ -17,13 +18,13 @@ export const setModels = (val) => {
models.value = val
}
export const getStoredModels = () => {
let models = get(STORAGE_KEY.MODELS)
if (!models) {
models = [DEFAULT_MODEL]
}
return models
}
// export const getStoredModels = () => {
// let models = get(STORAGE_KEY.MODELS)
// if (!models) {
// models = [DEFAULT_MODEL]
// }
// return models
// }
export const saveCurrentModel = (val) => {
set(STORAGE_KEY.CURRENT_MODEL, val)
@@ -32,7 +33,7 @@ export const saveCurrentModel = (val) => {
export const getCurrentModel = () => {
let model = get(STORAGE_KEY.CURRENT_MODEL)
if (!model) {
model = DEFAULT_MODEL
model = MODELS[DEFAULT_MODEL_NAME]
}
return model
}