Files
chatgpt-web/service/src/chatgpt/index.ts
Redon ffd4da91cf chore: version 2.10.0
* feat: 权限验证功能

* chore: v2.10.0

* feat: 500 服务异常页面

* feat: 只有结束才会滚动到底部

* chore: 修改 CHANGELOG

* chore: 不存在时输出默认报错
2023-03-07 22:12:15 +08:00

135 lines
4.4 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
import * as dotenv from 'dotenv'
import 'isomorphic-fetch'
import type { ChatGPTAPIOptions, ChatMessage, SendMessageOptions } from 'chatgpt'
import { ChatGPTAPI, ChatGPTUnofficialProxyAPI } from 'chatgpt'
import { SocksProxyAgent } from 'socks-proxy-agent'
import fetch from 'node-fetch'
import { sendResponse } from '../utils'
import type { ApiModel, ChatContext, ChatGPTUnofficialProxyAPIOptions, ModelConfig } from '../types'
const ErrorCodeMessage: Record<string, string> = {
400: '[OpenAI] 模型的最大上下文长度是4096个令牌请减少信息的长度。| This model\'s maximum context length is 4096 tokens.',
401: '[OpenAI] 提供错误的API密钥 | Incorrect API key provided',
403: '[OpenAI] 服务器拒绝访问,请稍后再试 | Server refused to access, please try again later',
429: '[OpenAI] 服务器限流,请稍后再试 | Server was limited, please try again later',
502: '[OpenAI] 错误的网关 | Bad Gateway',
503: '[OpenAI] 服务器繁忙,请稍后再试 | Server is busy, please try again later',
504: '[OpenAI] 网关超时 | Gateway Time-out',
500: '[OpenAI] 服务器繁忙,请稍后再试 | Internal Server Error',
}
dotenv.config()
const timeoutMs: number = !isNaN(+process.env.TIMEOUT_MS) ? +process.env.TIMEOUT_MS : 30 * 1000
let apiModel: ApiModel
if (!process.env.OPENAI_API_KEY && !process.env.OPENAI_ACCESS_TOKEN)
throw new Error('Missing OPENAI_API_KEY or OPENAI_ACCESS_TOKEN environment variable')
let api: ChatGPTAPI | ChatGPTUnofficialProxyAPI
(async () => {
// More Info: https://github.com/transitive-bullshit/chatgpt-api
if (process.env.OPENAI_API_KEY) {
const options: ChatGPTAPIOptions = {
apiKey: process.env.OPENAI_API_KEY,
completionParams: {
model: 'gpt-3.5-turbo',
},
debug: false,
}
if (process.env.OPENAI_API_BASE_URL && process.env.OPENAI_API_BASE_URL.trim().length > 0)
options.apiBaseUrl = process.env.OPENAI_API_BASE_URL
if (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT) {
const agent = new SocksProxyAgent({
hostname: process.env.SOCKS_PROXY_HOST,
port: process.env.SOCKS_PROXY_PORT,
})
options.fetch = (url, options) => {
return fetch(url, { agent, ...options })
}
}
api = new ChatGPTAPI({ ...options })
apiModel = 'ChatGPTAPI'
}
else {
const options: ChatGPTUnofficialProxyAPIOptions = {
accessToken: process.env.OPENAI_ACCESS_TOKEN,
debug: false,
}
if (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT) {
const agent = new SocksProxyAgent({
hostname: process.env.SOCKS_PROXY_HOST,
port: process.env.SOCKS_PROXY_PORT,
})
options.fetch = (url, options) => {
return fetch(url, { agent, ...options })
}
}
if (process.env.API_REVERSE_PROXY)
options.apiReverseProxyUrl = process.env.API_REVERSE_PROXY
api = new ChatGPTUnofficialProxyAPI({ ...options })
apiModel = 'ChatGPTUnofficialProxyAPI'
}
})()
async function chatReplyProcess(
message: string,
lastContext?: { conversationId?: string; parentMessageId?: string },
process?: (chat: ChatMessage) => void,
) {
if (!message)
return sendResponse({ type: 'Fail', message: 'Message is empty' })
try {
let options: SendMessageOptions = { timeoutMs }
if (lastContext) {
if (apiModel === 'ChatGPTAPI')
options = { parentMessageId: lastContext.parentMessageId }
else
options = { ...lastContext }
}
const response = await api.sendMessage(message, {
...options,
onProgress: (partialResponse) => {
process?.(partialResponse)
},
})
return sendResponse({ type: 'Success', data: response })
}
catch (error: any) {
const code = error.statusCode
global.console.log(error)
if (Reflect.has(ErrorCodeMessage, code))
return sendResponse({ type: 'Fail', message: ErrorCodeMessage[code] })
return sendResponse({ type: 'Fail', message: error.message ?? 'Please check the back-end console' })
}
}
async function chatConfig() {
return sendResponse({
type: 'Success',
data: {
apiModel,
reverseProxy: process.env.API_REVERSE_PROXY,
timeoutMs,
socksProxy: (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT) ? (`${process.env.SOCKS_PROXY_HOST}:${process.env.SOCKS_PROXY_PORT}`) : '-',
} as ModelConfig,
})
}
export type { ChatContext, ChatMessage }
export { chatReplyProcess, chatConfig }