Compare commits

..

11 Commits

Author SHA1 Message Date
Rafi
f67ed7621c Multiple improvements for conversation 2023-03-30 21:45:23 +08:00
Rafi
97649e4bee pass 2023-03-30 18:12:55 +08:00
Rafi
1082da050b rm @vite-pwa/nuxt 2023-03-30 10:37:55 +08:00
Wong Saang
d89d1e288d Merge pull request #97 from erritis/main
Add title for prompt
2023-03-30 09:31:37 +08:00
Sergey Shekhovtsov
cd89d11d0b Add docker compose file for development
Added docker compose file for development

For convenience when developing
2023-03-28 22:36:53 +03:00
Erritis
cf0053a060 Add title for prompt
Added title for prompt

The title will simplify the search in the list of prompts
2023-03-28 22:35:12 +03:00
Wong Saang
019da4399e Merge pull request #93 from erritis/russian
Add Russian language support
2023-03-28 12:49:37 +08:00
Wong Saang
044961bb01 Merge pull request #90 from erritis/main
Add prompt fields to the translation section
2023-03-28 12:34:33 +08:00
Erritis
2374c81edb Add Russian language support 2023-03-28 04:02:56 +03:00
Erritis
699760713e Add prompt fields to the translation section
Added prompt fields to the translation section

Not all interface elements had the ability to add translation
2023-03-28 03:03:23 +03:00
Rafi
d75413cc49 update readme 2023-03-27 22:26:47 +08:00
16 changed files with 549 additions and 2339 deletions

View File

@@ -10,6 +10,13 @@ https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-
## 📢Updates
<details open>
<summary><strong>2023-03-27</strong></summary>
🚀 Support gpt-4 model. You can select the model in the "Model Parameters" of the front-end.
The GPT-4 model requires whitelist access from OpenAI.
</details>
<details open>
<summary><strong>2023-03-23</strong></summary>
Added web search capability to generate more relevant and up-to-date answers from ChatGPT!
@@ -25,17 +32,7 @@ Add "open_registration" setting option in the admin panel to control whether use
</details>
<details open>
<summary><strong>2023-03-10</strong></summary>
Add 2 environment variables to control the typewriter effect:
- `NUXT_PUBLIC_TYPEWRITER=true` to enable/disable the typewriter effect
- `NUXT_PUBLIC_TYPEWRITER_DELAY=50` to set the delay time for each character in milliseconds.
</details>
<details open>
<details>
<summary><strong>2023-03-04</strong></summary>
**Update to the latest official chat model** `gpt-3.5-turbo`
@@ -98,9 +95,6 @@ services:
image: wongsaang/chatgpt-ui-client:latest
environment:
- SERVER_DOMAIN=http://backend-web-server
- NUXT_PUBLIC_APP_NAME='ChatGPT UI' # App name
- NUXT_PUBLIC_TYPEWRITER=true # Enable typewriter effect, default is false
- NUXT_PUBLIC_TYPEWRITER_DELAY=100 # Typewriter effect delay time, default is 50ms
depends_on:
- backend-web-server
ports:

289
components/Conversation.vue Normal file
View File

@@ -0,0 +1,289 @@
<script setup>
import {EventStreamContentType, fetchEventSource} from '@microsoft/fetch-event-source'
import {addConversation} from "../utils/helper";
const { $i18n, $auth } = useNuxtApp()
const runtimeConfig = useRuntimeConfig()
const currentModel = useCurrentModel()
const openaiApiKey = useApiKey()
const fetchingResponse = ref(false)
const messageQueue = []
let isProcessingQueue = false
const props = defineProps({
conversation: {
type: Object,
required: true
}
})
const processMessageQueue = () => {
if (isProcessingQueue || messageQueue.length === 0) {
return
}
if (!props.conversation.messages[props.conversation.messages.length - 1].is_bot) {
props.conversation.messages.push({id: null, is_bot: true, message: ''})
}
isProcessingQueue = true
const nextMessage = messageQueue.shift()
if (runtimeConfig.public.typewriter) {
let wordIndex = 0;
const intervalId = setInterval(() => {
props.conversation.messages[props.conversation.messages.length - 1].message += nextMessage[wordIndex]
wordIndex++
if (wordIndex === nextMessage.length) {
clearInterval(intervalId)
isProcessingQueue = false
processMessageQueue()
}
}, runtimeConfig.public.typewriterDelay)
} else {
props.conversation.messages[props.conversation.messages.length - 1].message += nextMessage
isProcessingQueue = false
processMessageQueue()
}
}
let ctrl
const abortFetch = () => {
if (ctrl) {
ctrl.abort()
}
fetchingResponse.value = false
}
const fetchReply = async (message) => {
ctrl = new AbortController()
let webSearchParams = {}
if (enableWebSearch.value) {
webSearchParams['web_search'] = {
ua: navigator.userAgent,
default_prompt: $i18n.t('webSearchDefaultPrompt')
}
}
const data = Object.assign({}, currentModel.value, {
openaiApiKey: enableCustomApiKey.value ? openaiApiKey.value : null,
message: message,
conversationId: props.conversation.id
}, webSearchParams)
try {
await fetchEventSource('/api/conversation/', {
signal: ctrl.signal,
method: 'POST',
headers: {
'accept': 'application/json',
'Content-Type': 'application/json',
},
body: JSON.stringify(data),
onopen(response) {
if (response.ok && response.headers.get('content-type') === EventStreamContentType) {
return;
}
throw new Error(`Failed to send message. HTTP ${response.status} - ${response.statusText}`);
},
onclose() {
if (ctrl.signal.aborted === true) {
return;
}
throw new Error(`Failed to send message. Server closed the connection unexpectedly.`);
},
onerror(err) {
throw err;
},
async onmessage(message) {
const event = message.event
const data = JSON.parse(message.data)
if (event === 'error') {
abortFetch()
showSnackbar(data.error)
return;
}
if (event === 'userMessageId') {
props.conversation.messages[props.conversation.messages.length - 1].id = data.userMessageId
return;
}
if (event === 'done') {
if (props.conversation.id === null) {
props.conversation.id = data.conversationId
genTitle(props.conversation.id)
}
props.conversation.messages[props.conversation.messages.length - 1].id = data.messageId
abortFetch()
return;
}
messageQueue.push(data.content)
processMessageQueue()
scrollChatWindow()
},
})
} catch (err) {
console.log(err)
abortFetch()
showSnackbar(err.message)
}
}
const grab = ref(null)
const scrollChatWindow = () => {
if (grab.value === null) {
return;
}
grab.value.scrollIntoView({behavior: 'smooth'})
}
const checkOrAddConversation = () => {
if (props.conversation.messages.length === 0) {
props.conversation.messages.push({id: null, is_bot: true, message: ''})
}
}
const send = (message) => {
fetchingResponse.value = true
if (props.conversation.messages.length === 0) {
addConversation(props.conversation)
}
props.conversation.messages.push({message: message})
fetchReply(message)
scrollChatWindow()
}
const stop = () => {
abortFetch()
}
const snackbar = ref(false)
const snackbarText = ref('')
const showSnackbar = (text) => {
snackbarText.value = text
snackbar.value = true
}
const editor = ref(null)
const usePrompt = (prompt) => {
editor.value.usePrompt(prompt)
}
const deleteMessage = (index) => {
props.conversation.messages.splice(index, 1)
}
const showWebSearchToggle = ref(false)
const enableWebSearch = ref(false)
const enableCustomApiKey = ref(false)
const settings = useSettings()
watchEffect(() => {
if (settings.value) {
const settingsValue = toRaw(settings.value)
showWebSearchToggle.value = settingsValue.open_web_search && settingsValue.open_web_search === 'True'
enableCustomApiKey.value = settingsValue.open_api_key_setting && settingsValue.open_api_key_setting === 'True'
}
})
</script>
<template>
<div
v-if="conversation.loadingMessages"
class="text-center"
>
<v-progress-circular
indeterminate
color="primary"
></v-progress-circular>
</div>
<div v-else>
<div
v-if="conversation.messages.length > 0"
ref="chatWindow"
>
<v-container>
<v-row>
<v-col
v-for="(message, index) in conversation.messages" :key="index"
cols="12"
>
<div
class="d-flex align-center"
:class="message.is_bot ? 'justify-start' : 'justify-end'"
>
<MessageActions
v-if="!message.is_bot"
:message="message"
:message-index="index"
:use-prompt="usePrompt"
:delete-message="deleteMessage"
/>
<MsgContent :message="message" />
<MessageActions
v-if="message.is_bot"
:message="message"
:message-index="index"
:use-prompt="usePrompt"
:delete-message="deleteMessage"
/>
</div>
</v-col>
</v-row>
</v-container>
<div ref="grab" class="w-100" style="height: 200px;"></div>
</div>
<Welcome v-if="conversation.id === null && conversation.messages.length === 0" />
</div>
<v-footer app>
<div class="px-md-16 w-100 d-flex flex-column">
<div class="d-flex align-center">
<v-btn
v-show="fetchingResponse"
icon="close"
title="stop"
class="mr-3"
@click="stop"
></v-btn>
<MsgEditor ref="editor" :send-message="send" :disabled="fetchingResponse" :loading="fetchingResponse" />
</div>
<v-toolbar
density="comfortable"
color="transparent"
>
<Prompt v-show="!fetchingResponse" :use-prompt="usePrompt" />
<v-switch
v-if="showWebSearchToggle"
v-model="enableWebSearch"
hide-details
color="primary"
:label="$t('webSearch')"
></v-switch>
<v-spacer></v-spacer>
</v-toolbar>
</div>
</v-footer>
<v-snackbar
v-model="snackbar"
multi-line
location="top"
>
{{ snackbarText }}
<template v-slot:actions>
<v-btn
color="red"
variant="text"
@click="snackbar = false"
>
Close
</v-btn>
</template>
</v-snackbar>
</template>

View File

@@ -2,6 +2,7 @@
const menu = ref(false)
const prompts = ref([])
const editingPrompt = ref(null)
const newTitlePrompt = ref(null)
const newPrompt = ref('')
const submittingNewPrompt = ref(false)
const promptInputErrorMessage = ref('')
@@ -24,11 +25,13 @@ const addPrompt = async () => {
const { data, error } = await useAuthFetch('/api/chat/prompts/', {
method: 'POST',
body: JSON.stringify({
title: newTitlePrompt.value,
prompt: newPrompt.value
})
})
if (!error.value) {
prompts.value.push(data.value)
newTitlePrompt.value = null
newPrompt.value = ''
}
submittingNewPrompt.value = false
@@ -43,6 +46,7 @@ const updatePrompt = async (index) => {
const { data, error } = await useAuthFetch(`/api/chat/prompts/${editingPrompt.value.id}/`, {
method: 'PUT',
body: JSON.stringify({
title: editingPrompt.value.title,
prompt: editingPrompt.value.prompt
})
})
@@ -110,7 +114,7 @@ onMounted( () => {
max-width="500"
>
<v-card-title>
<span class="headline">Frequently prompts</span>
<span class="headline">{{ $t('frequentlyPrompts') }}</span>
</v-card-title>
<v-divider></v-divider>
@@ -127,35 +131,47 @@ onMounted( () => {
>
<v-list-item
active-color="primary"
rounded="xl"
v-if="editingPrompt && editingPrompt.id === prompt.id"
>
<v-textarea
rows="2"
v-model="editingPrompt.prompt"
:loading="editingPrompt.updating"
variant="underlined"
hide-details
density="compact"
>
<template v-slot:append>
<div class="d-flex flex-column">
<v-btn
icon="done"
variant="text"
:loading="editingPrompt.updating"
@click="updatePrompt(idx)"
>
</v-btn>
<v-btn
icon="close"
variant="text"
@click="cancelEditPrompt()"
>
</v-btn>
</div>
</template>
</v-textarea>
<div class="d-flex flex-row" :style="{ marginTop: '5px' }">
<div class="flex-grow-1">
<v-text-field
v-model="editingPrompt.title"
:loading="editingPrompt.updating"
:label="$t('titlePrompt')"
variant="underlined"
density="compact"
hide-details
>
</v-text-field>
<v-textarea
rows="2"
v-model="editingPrompt.prompt"
:loading="editingPrompt.updating"
variant="underlined"
density="compact"
hide-details
>
</v-textarea>
</div>
<div>
<div class="d-flex flex-column">
<v-btn
icon="done"
variant="text"
:loading="editingPrompt.updating"
@click="updatePrompt(idx)"
>
</v-btn>
<v-btn
icon="close"
variant="text"
@click="cancelEditPrompt()"
>
</v-btn>
</div>
</div>
</div>
</v-list-item>
<v-list-item
v-if="!editingPrompt || editingPrompt.id !== prompt.id"
@@ -163,7 +179,7 @@ onMounted( () => {
active-color="primary"
@click="selectPrompt(prompt)"
>
<v-list-item-title>{{ prompt.prompt }}</v-list-item-title>
<v-list-item-title>{{ prompt.title ? prompt.title : prompt.prompt }}</v-list-item-title>
<template v-slot:append>
<v-btn
icon="edit"
@@ -184,6 +200,25 @@ onMounted( () => {
</v-list-item>
</template>
<v-list-item
active-color="primary"
>
<div
class="pt-3"
>
<v-text-field
rows="1"
v-model="newTitlePrompt"
:label="$t('titlePrompt')"
variant="outlined"
density="compact"
hide-details
clearable
>
</v-text-field>
</div>
</v-list-item>
<v-list-item
active-color="primary"
>
@@ -193,7 +228,7 @@ onMounted( () => {
<v-textarea
rows="2"
v-model="newPrompt"
label="Add a new prompt"
:label="$t('addNewPrompt')"
variant="outlined"
density="compact"
:error-messages="promptInputErrorMessage"
@@ -211,7 +246,7 @@ onMounted( () => {
@click="addPrompt()"
>
<v-icon icon="add"></v-icon>
Add prompt
{{ $t('addPrompt') }}
</v-btn>
</v-list-item>
</v-list>

View File

@@ -5,8 +5,8 @@ export const useCurrentModel = () => useState('currentModel', () => getCurrentMo
export const useApiKey = () => useState('apiKey', () => getStoredApiKey())
export const useConversion = () => useState('conversion', () => getDefaultConversionData())
export const useConversation = () => useState('conversation', () => getDefaultConversationData())
export const useConversions = () => useState('conversions', () => [])
export const useConversations = () => useState('conversations', () => [])
export const useSettings = () => useState('settings', () => {})

16
docker-compose.dev.yml Normal file
View File

@@ -0,0 +1,16 @@
version: '3'
services:
client:
platform: linux/x86_64
build: .
environment:
SERVER_DOMAIN: http://web-server
ports:
- '${CLIENT_PORT:-8080}:80'
networks:
- chatgpt_network
restart: always
networks:
chatgpt_network:
external: True

View File

@@ -5,9 +5,6 @@ services:
image: wongsaang/chatgpt-ui-client:latest
environment:
- SERVER_DOMAIN=http://backend-web-server
- NUXT_PUBLIC_APP_NAME='ChatGPT UI'
- NUXT_PUBLIC_TYPEWRITER=true
- NUXT_PUBLIC_TYPEWRITER_DELAY=100
depends_on:
- backend-web-server
ports:

View File

@@ -10,6 +10,12 @@ https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-
## 📢 更新
<details open>
<summary><strong>2023-03-27</strong></summary>
🚀 支持 gpt-4 模型。你可以在前端的“模型参数”中选择模型gpt-4 模型需要通过 openai 的白名单才能使用。
</details>
<details open>
<summary><strong>2023-03-23</strong></summary>
增加网页搜索能力,使得 ChatGPT 生成的回答更与时俱进!
@@ -23,17 +29,7 @@ https://user-images.githubusercontent.com/46235412/227156264-ca17ab17-999b-414f-
</details>
<details open>
<summary><strong>2023-03-10</strong></summary>
增加 2 个环境变量来控制打字机效果, 详见下方 docker-compose 配置的环境变量说明
- `NUXT_PUBLIC_TYPEWRITER` 是否开启打字机效果
- `NUXT_PUBLIC_TYPEWRITER_DELAY` 每个字的延迟时间,单位:毫秒
</details>
<details open>
<details>
<summary><strong>2023-03-04</strong></summary>
**使用最新的官方聊天模型** `gpt-3.5-turbo`
@@ -95,9 +91,6 @@ services:
image: wongsaang/chatgpt-ui-client:latest
environment:
- SERVER_DOMAIN=http://backend-web-server
- NUXT_PUBLIC_APP_NAME='ChatGPT UI' # App 名称,默认为 ChatGPT UI
- NUXT_PUBLIC_TYPEWRITER=true # 是否启用打字机效果,默认关闭
- NUXT_PUBLIC_TYPEWRITER_DELAY=100 # 打字机效果的延迟时间,默认 50毫秒
depends_on:
- backend-web-server
ports:

View File

@@ -10,6 +10,10 @@
"saveAndClose": "Save & Close",
"pleaseSelectAtLeastOneModelDot": "Please select at least one model.",
"writeAMessage": "Write a message",
"frequentlyPrompts": "Frequently prompts",
"addPrompt": "Add prompt",
"titlePrompt": "Title",
"addNewPrompt": "Add a new prompt",
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "Press Enter to send your message or Shift+Enter to add a new line",
"lightMode": "Light Mode",
"darkMode": "Dark Mode",
@@ -17,6 +21,7 @@
"themeMode": "Theme Mode",
"feedback": "Feedback",
"newConversation": "New conversation",
"defaultConversationTitle": "Unnamed",
"clearConversations": "Clear conversations",
"modelParameters": "Model Parameters",
"model": "Model",
@@ -36,6 +41,7 @@
"signOut": "Sign out",
"webSearch": "Web Search",
"webSearchDefaultPrompt": "Web search results:\n\n[web_results]\nCurrent date: [current_date]\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: [query]",
"genTitlePrompt": "Generate a short title for the following content, no more than 10 words. \n\nContent: ",
"welcomeScreen": {
"introduction1": "is an unofficial client for ChatGPT, but uses the official OpenAI API.",
"introduction2": "You will need an OpenAI API Key before you can use this client.",

67
lang/ru-RU.json Normal file
View File

@@ -0,0 +1,67 @@
{
"welcomeTo": "Добро пожаловать в",
"language": "Язык",
"setApiKey": "Установить ключ API",
"setOpenAIApiKey": "Установить ключ API OpenAI",
"openAIApiKey": "Ключ API OpenAI",
"getAKey": "Получить ключ",
"openAIModels": "Модели OpenAI",
"aboutTheModels": "О моделях",
"saveAndClose": "Сохранить & Закрыть",
"pleaseSelectAtLeastOneModelDot": "Выберите хотя бы одну модель.",
"writeAMessage": "Напишите сообщение",
"frequentlyPrompts": "Список подсказок",
"addPrompt": "Добавить подсказку",
"titlePrompt": "Заголовок",
"addNewPrompt": "Добавитьте новую подсказку",
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "Нажмите Enter, чтобы отправить сообщение, или Shift+Enter, чтобы добавить новую строку.",
"lightMode": "Светлая",
"darkMode": "Темная",
"followSystem": "Системная",
"themeMode": "Тема",
"feedback": "Обратная связь",
"newConversation": "Новый чат",
"defaultConversationTitle": "Безымянный",
"clearConversations": "Очистить чаты",
"modelParameters": "Параметры модели",
"model": "Модель",
"temperature": "Temperature",
"topP": "Top P",
"frequencyPenalty": "Frequency Penalty",
"presencePenalty": "Presence Penalty",
"maxTokens": "Max Tokens",
"roles": {
"me": "Я",
"ai": "AI"
},
"edit": "Редактировать",
"copy": "Копировать",
"copied": "Скопировано",
"delete": "Удалить",
"signOut": "Выход",
"webSearch": "Поиск в интернете",
"webSearchDefaultPrompt": "Результаты веб-поиска:\n\n[web_results]\nТекущая дата: [current_date]\n\nИнструкции: Используя предоставленные результаты веб-поиска, напишите развернутый ответ на заданный запрос. Обязательно цитируйте результаты, используя обозначение [[number](URL)] после ссылки. Если предоставленные результаты поиска относятся к нескольким темам с одинаковым названием, напишите отдельные ответы для каждой темы.\nЗапрос: [query]",
"genTitlePrompt": "Придумайте короткий заголовок для следующего содержания, не более 10 слов. \n\nСодержание: ",
"welcomeScreen": {
"introduction1": "является неофициальным клиентом для ChatGPT, но использует официальный API OpenAI.",
"introduction2": "Вам понадобится ключ API OpenAI, прежде чем вы сможете использовать этот клиент.",
"examples": {
"title": "Примеры",
"item1": "\"Объясни, что такое квантовые вычисления простыми словами\"",
"item2": "\"Предложи несколько креативных идей для дня рождения 10-летнего ребенка?\"",
"item3": "\"Как сделать HTTP-запрос в Javascript?\""
},
"capabilities": {
"title": "Возможности",
"item1": "Помнит, что пользователь сказал ранее в разговоре",
"item2": "Позволяет пользователю вносить последующие исправления",
"item3": "Научен отклонять неуместные запросы"
},
"limitations": {
"title": "Ограничения",
"item1": "Иногда может генерировать неверную информацию",
"item2": "Иногда может создавать вредные инструкции или предвзятый контент",
"item3": "Ограниченное знание мира и событий после 2021 года"
}
}
}

View File

@@ -10,6 +10,10 @@
"saveAndClose": "保存并关闭",
"pleaseSelectAtLeastOneModelDot": "请至少选择一个模型",
"writeAMessage": "输入信息",
"frequentlyPrompts": "Frequently prompts",
"addPrompt": "Add prompt",
"titlePrompt": "Title",
"addNewPrompt": "Add a new prompt",
"pressEnterToSendYourMessageOrShiftEnterToAddANewLine": "按回车键发送您的信息或按Shift+Enter键添加新行",
"lightMode": "明亮模式",
"darkMode": "暗色模式",
@@ -17,6 +21,7 @@
"themeMode": "主题模式",
"feedback": "反馈",
"newConversation": "新的对话",
"defaultConversationTitle": "未命名",
"clearConversations": "清除对话",
"modelParameters": "模型参数",
"model": "模型",
@@ -36,6 +41,7 @@
"signOut": "退出登录",
"webSearch": "网页搜索",
"webSearchDefaultPrompt": "网络搜索结果:\n\n[web_results]\n当前日期[current_date]\n\n说明使用提供的网络搜索结果对给定的查询写出全面的回复。确保在引用参考文献后使用 [[number](URL)] 符号进行引用结果. 如果提供的搜索结果涉及到多个具有相同名称的主题,请针对每个主题编写单独的答案。\n查询[query]",
"genTitlePrompt": "为以下内容生成一个不超过10个字的简短标题。 \n\n内容: ",
"welcomeScreen": {
"introduction1": "是一个非官方的ChatGPT客户端但使用OpenAI的官方API",
"introduction2": "在使用本客户端之前您需要一个OpenAI API密钥。",

View File

@@ -22,8 +22,8 @@ const setLang = (lang) => {
setLocale(lang)
}
const conversations = useConversions()
const currentConversation = useConversion()
const conversations = useConversations()
const currentConversation = useConversation()
const editingConversation = ref(null)
const deletingConversationIndex = ref(null)
@@ -54,7 +54,8 @@ const deleteConversation = async (index) => {
deletingConversationIndex.value = null
if (!error.value) {
if (conversations.value[index].id === currentConversation.value.id) {
createNewConversion()
console.log('delete current conversation')
createNewConversation()
}
conversations.value.splice(index, 1)
}
@@ -78,7 +79,7 @@ const loadingConversations = ref(false)
const loadConversations = async () => {
loadingConversations.value = true
conversations.value = await getConversions()
conversations.value = await getConversations()
loadingConversations.value = false
}
@@ -130,8 +131,8 @@ onMounted(async () => {
block
variant="outlined"
prepend-icon="add"
@click="createNewConversion()"
class="text-none"
@click="createNewConversation"
>
{{ $t('newConversation') }}
</v-btn>
@@ -172,19 +173,19 @@ onMounted(async () => {
<v-list-item
rounded="xl"
active-color="primary"
@click="openConversationMessages(conversation)"
:to="conversation.id ? `/${conversation.id}` : undefined"
v-bind="props"
>
<v-list-item-title>{{ conversation.topic }}</v-list-item-title>
<template v-slot:append>
<div
v-show="isHovering"
v-show="isHovering && conversation.id"
>
<v-btn
icon="edit"
size="small"
variant="text"
@click.stop="editConversation(cIdx)"
@click.prevent="editConversation(cIdx)"
>
</v-btn>
<v-btn
@@ -192,7 +193,7 @@ onMounted(async () => {
size="small"
variant="text"
:loading="deletingConversationIndex === cIdx"
@click.stop="deleteConversation(cIdx)"
@click.prevent="deleteConversation(cIdx)"
>
</v-btn>
</div>
@@ -310,7 +311,7 @@ onMounted(async () => {
<v-btn
:title="$t('newConversation')"
icon="add"
@click="createNewConversion()"
@click="createNewConversation()"
></v-btn>
</v-app-bar>

View File

@@ -54,6 +54,12 @@ export default defineNuxtConfig({
iso: 'zh-CN',
name: '简体中文',
file: 'zh-CN.json',
},
{
code: 'ru',
iso: 'ru-RU',
name: 'Русский',
file: 'ru-RU.json',
}
],
lazy: true,

View File

@@ -11,7 +11,6 @@
"@kevinmarrec/nuxt-pwa": "^0.17.0",
"@nuxtjs/color-mode": "^3.2.0",
"@nuxtjs/i18n": "^8.0.0-beta.9",
"@vite-pwa/nuxt": "^0.0.7",
"material-design-icons-iconfont": "^6.7.0",
"nuxt": "^3.2.0"
},

View File

@@ -1,270 +1,35 @@
<script setup>
import Prompt from "~/components/Prompt.vue";
definePageMeta({
middleware: ["auth"]
middleware: ["auth"],
path: '/:id?',
keepalive: true
})
import {EventStreamContentType, fetchEventSource} from '@microsoft/fetch-event-source'
const { $i18n, $auth } = useNuxtApp()
const runtimeConfig = useRuntimeConfig()
const currentModel = useCurrentModel()
const openaiApiKey = useApiKey()
const fetchingResponse = ref(false)
const messageQueue = []
let isProcessingQueue = false
const processMessageQueue = () => {
if (isProcessingQueue || messageQueue.length === 0) {
return
}
if (!currentConversation.value.messages[currentConversation.value.messages.length - 1].is_bot) {
currentConversation.value.messages.push({id: null, is_bot: true, message: ''})
}
isProcessingQueue = true
const nextMessage = messageQueue.shift()
if (runtimeConfig.public.typewriter) {
let wordIndex = 0;
const intervalId = setInterval(() => {
currentConversation.value.messages[currentConversation.value.messages.length - 1].message += nextMessage[wordIndex]
wordIndex++
if (wordIndex === nextMessage.length) {
clearInterval(intervalId)
isProcessingQueue = false
processMessageQueue()
}
}, runtimeConfig.public.typewriterDelay)
} else {
currentConversation.value.messages[currentConversation.value.messages.length - 1].message += nextMessage
isProcessingQueue = false
processMessageQueue()
}
}
let ctrl
const abortFetch = () => {
if (ctrl) {
ctrl.abort()
}
fetchingResponse.value = false
}
const fetchReply = async (message) => {
ctrl = new AbortController()
let webSearchParams = {}
if (enableWebSearch.value) {
webSearchParams['web_search'] = {
ua: navigator.userAgent,
default_prompt: $i18n.t('webSearchDefaultPrompt')
}
}
const data = Object.assign({}, currentModel.value, {
openaiApiKey: enableCustomApiKey.value ? openaiApiKey.value : null,
message: message,
conversationId: currentConversation.value.id
}, webSearchParams)
try {
await fetchEventSource('/api/conversation/', {
signal: ctrl.signal,
method: 'POST',
headers: {
'accept': 'application/json',
'Content-Type': 'application/json',
},
body: JSON.stringify(data),
onopen(response) {
if (response.ok && response.headers.get('content-type') === EventStreamContentType) {
return;
}
throw new Error(`Failed to send message. HTTP ${response.status} - ${response.statusText}`);
},
onclose() {
if (ctrl.signal.aborted === true) {
return;
}
throw new Error(`Failed to send message. Server closed the connection unexpectedly.`);
},
onerror(err) {
throw err;
},
async onmessage(message) {
const event = message.event
const data = JSON.parse(message.data)
if (event === 'error') {
abortFetch()
showSnackbar(data.error)
return;
}
if (event === 'userMessageId') {
currentConversation.value.messages[currentConversation.value.messages.length - 1].id = data.userMessageId
return;
}
if (event === 'done') {
if (currentConversation.value.id === null) {
currentConversation.value.id = data.conversationId
genTitle(currentConversation.value.id)
}
currentConversation.value.messages[currentConversation.value.messages.length - 1].id = data.messageId
abortFetch()
return;
}
messageQueue.push(data.content)
processMessageQueue()
scrollChatWindow()
},
})
} catch (err) {
console.log(err)
abortFetch()
showSnackbar(err.message)
}
}
const currentConversation = useConversion()
const grab = ref(null)
const scrollChatWindow = () => {
if (grab.value === null) {
return;
}
grab.value.scrollIntoView({behavior: 'smooth'})
}
const send = (message) => {
fetchingResponse.value = true
currentConversation.value.messages.push({message: message})
fetchReply(message)
scrollChatWindow()
}
const stop = () => {
abortFetch()
}
const snackbar = ref(false)
const snackbarText = ref('')
const showSnackbar = (text) => {
snackbarText.value = text
snackbar.value = true
}
const editor = ref(null)
const usePrompt = (prompt) => {
editor.value.usePrompt(prompt)
}
const deleteMessage = (index) => {
currentConversation.value.messages.splice(index, 1)
}
const showWebSearchToggle = ref(false)
const enableWebSearch = ref(false)
const enableCustomApiKey = ref(false)
const settings = useSettings()
const route = useRoute()
const currentConversation = useConversation()
const conversation = ref(getDefaultConversationData())
watchEffect(() => {
if (settings.value) {
const settingsValue = toRaw(settings.value)
showWebSearchToggle.value = settingsValue.open_web_search && settingsValue.open_web_search === 'True'
enableCustomApiKey.value = settingsValue.open_api_key_setting && settingsValue.open_api_key_setting === 'True'
if (!route.params.id) {
conversation.value = getDefaultConversationData()
}
})
const loadMessage = async () => {
conversation.value.loadingMessages = true
const { data, error } = await useAuthFetch('/api/chat/messages/?conversationId=' + route.params.id)
if (!error.value) {
conversation.value.messages = data.value
}
conversation.value.loadingMessages = false
}
onMounted(async () => {
if (route.params.id) {
conversation.value.id = parseInt(route.params.id)
await loadMessage()
}
currentConversation.value = Object.assign({}, conversation.value)
})
</script>
<template>
<div
v-if="currentConversation.messages.length > 0"
ref="chatWindow"
>
<v-container>
<v-row>
<v-col
v-for="(message, index) in currentConversation.messages" :key="index"
cols="12"
>
<div
class="d-flex align-center"
:class="message.is_bot ? 'justify-start' : 'justify-end'"
>
<MessageActions
v-if="!message.is_bot"
:message="message"
:message-index="index"
:use-prompt="usePrompt"
:delete-message="deleteMessage"
/>
<MsgContent :message="message" />
<MessageActions
v-if="message.is_bot"
:message="message"
:message-index="index"
:use-prompt="usePrompt"
:delete-message="deleteMessage"
/>
</div>
</v-col>
</v-row>
</v-container>
<div ref="grab" class="w-100" style="height: 200px;"></div>
</div>
<Welcome v-else />
<v-footer app>
<div class="px-md-16 w-100 d-flex flex-column">
<div class="d-flex align-center">
<v-btn
v-show="fetchingResponse"
icon="close"
title="stop"
class="mr-3"
@click="stop"
></v-btn>
<MsgEditor ref="editor" :send-message="send" :disabled="fetchingResponse" :loading="fetchingResponse" />
</div>
<v-toolbar
density="comfortable"
color="transparent"
>
<Prompt v-show="!fetchingResponse" :use-prompt="usePrompt" />
<v-switch
v-if="showWebSearchToggle"
v-model="enableWebSearch"
hide-details
color="primary"
:label="$t('webSearch')"
></v-switch>
<v-spacer></v-spacer>
</v-toolbar>
<!-- <div class="py-2 text-disabled text-caption font-weight-light text-center">-->
<!-- © {{ new Date().getFullYear() }} {{ runtimeConfig.public.appName }}-->
<!-- </div>-->
</div>
</v-footer>
<v-snackbar
v-model="snackbar"
multi-line
location="top"
>
{{ snackbarText }}
<template v-slot:actions>
<v-btn
color="red"
variant="text"
@click="snackbar = false"
>
Close
</v-btn>
</template>
</v-snackbar>
<Conversation :conversation="conversation" />
</template>

View File

@@ -1,14 +1,15 @@
export const getDefaultConversionData = () => {
export const getDefaultConversationData = () => {
const { $i18n } = useNuxtApp()
return {
id: null,
topic: null,
topic: $i18n.t('defaultConversationTitle'),
messages: [],
loadingMessages: false,
}
}
export const getConversions = async () => {
export const getConversations = async () => {
const { data, error } = await useAuthFetch('/api/chat/conversations/')
if (!error.value) {
return data.value
@@ -16,37 +17,35 @@ export const getConversions = async () => {
return []
}
export const createNewConversion = () => {
const conversation = useConversion()
conversation.value = getDefaultConversionData()
export const createNewConversation = () => {
const conversation = useConversation()
conversation.value = getDefaultConversationData()
navigateTo('/')
}
export const openConversationMessages = async (currentConversation) => {
const conversation = useConversion()
conversation.value = Object.assign(conversation.value, currentConversation)
conversation.value.loadingMessages = true
const { data, error } = await useAuthFetch('/api/chat/messages/?conversationId=' + currentConversation.id)
if (!error.value) {
conversation.value.messages = data.value
}
conversation.value.loadingMessages = true
export const addConversation = (conversation) => {
const conversations = useConversations()
conversations.value = [conversation, ...conversations.value]
}
export const genTitle = async (conversationId) => {
const { $i18n } = useNuxtApp()
const { data, error } = await useAuthFetch('/api/gen_title/', {
method: 'POST',
body: {
conversationId: conversationId
conversationId: conversationId,
prompt: $i18n.t('genTitlePrompt')
}
})
if (!error.value) {
const conversation = {
id: conversationId,
topic: data.value.title,
const conversations = useConversations()
let index = conversations.value.findIndex(item => item.id === conversationId)
if (index === -1) {
index = 0
}
const conversations = useConversions()
// prepend to conversations
conversations.value = [conversation, ...conversations.value]
conversations.value[index].topic = data.value.title
return data.value.title
}
return null

2003
yarn.lock

File diff suppressed because it is too large Load Diff