Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3af6192633 | ||
|
|
5b75b51059 | ||
|
|
49b89d5aad | ||
|
|
87386c5061 | ||
|
|
e2368fc284 | ||
|
|
ff2410ebea | ||
|
|
3315e5940f | ||
|
|
38f7a73288 | ||
|
|
7151dac97d | ||
|
|
086bfb3ce9 | ||
|
|
cd58d80be9 | ||
|
|
00b3aa9bb8 | ||
|
|
8822742664 | ||
|
|
2ea1d1d02a | ||
|
|
1d4ffa603d | ||
|
|
b461c75222 | ||
|
|
e2fd28897c | ||
|
|
bf908be12e | ||
|
|
dc4ffb96b3 | ||
|
|
39cbed1853 | ||
|
|
8d82c1b930 | ||
|
|
9d3c30785e | ||
|
|
f5068c3a58 | ||
|
|
df71f28edf | ||
|
|
6bb73cf30b | ||
|
|
37d888228c | ||
|
|
4291a95e27 | ||
|
|
5df6c92050 | ||
|
|
d7dfa7b216 | ||
|
|
f09b7c8d95 | ||
|
|
be04f67050 | ||
|
|
1b9d4b5e65 | ||
|
|
c37db35a33 | ||
|
|
062645499b | ||
|
|
ccdb3baa0d | ||
|
|
6de5909c48 |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go.sum
|
||||
chatgpt-service
|
||||
@@ -2,11 +2,11 @@ FROM alpine:3.15.3
|
||||
|
||||
LABEL maintainer="cookeem"
|
||||
LABEL email="cookeem@qq.com"
|
||||
LABEL version="v1.0.0"
|
||||
LABEL version="v1.0.3"
|
||||
|
||||
RUN adduser -h /chatgpt-service -u 1000 -D dory
|
||||
COPY chatgpt-service /chatgpt-service/
|
||||
WORKDIR /chatgpt-service
|
||||
USER dory
|
||||
|
||||
# docker build -t doryengine/chatgpt-service:v1.0.0-alpine .
|
||||
# docker build -t doryengine/chatgpt-service:v1.0.3-alpine .
|
||||
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 SeeFlowerX
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
72
README.md
72
README.md
@@ -1,35 +1,54 @@
|
||||
# 实时ChatGPT服务
|
||||
# Real-time ChatGPT service, support GPT3/GPT4, support conversation and generate pictures from sentences
|
||||
|
||||
## chatGPT-service和chatGPT-stream
|
||||
- [English README](README.md)
|
||||
- [中文 README](README_CN.md)
|
||||
|
||||
- chatGPT-service: [https://github.com/cookeem/chatgpt-service](https://github.com/cookeem/chatgpt-service)
|
||||
- chatGPT-service是一个后端服务,用于实时接收chatGPT的消息,并通过websocket的方式实时反馈给chatGPT-stream
|
||||
- chatGPT-stream: [https://github.com/cookeem/chatgpt-stream](https://github.com/cookeem/chatgpt-stream)
|
||||
- chatGPT-stream是一个前端服务,以websocket的方式实时接收chatGPT-service返回的消息
|
||||
## About chatgpt-service and chatgpt-stream
|
||||
|
||||
## 效果图
|
||||
- chatgpt-service: [https://github.com/cookeem/chatgpt-service](https://github.com/cookeem/chatgpt-service)
|
||||
- chatgpt-service is a backend service, used to receive chatGPT messages in real time, and feed back to chatGPT-stream in real time through websocket
|
||||
- chatgpt-stream: [https://github.com/cookeem/chatgpt-stream](https://github.com/cookeem/chatgpt-stream)
|
||||
- chatgpt-stream is a front-end service that receives messages returned by chatGPT-service in real time through websocket
|
||||
|
||||
## gitee
|
||||
|
||||
- [https://gitee.com/cookeem/chatgpt-service](https://gitee.com/cookeem/chatgpt-service)
|
||||
- [https://gitee.com/cookeem/chatgpt-stream](https://gitee.com/cookeem/chatgpt-stream)
|
||||
|
||||
## Demo
|
||||
|
||||
- Real-time conversation mode
|
||||
|
||||

|
||||
|
||||
- Generate picture patterns from sentences
|
||||
|
||||
## 快速开始
|
||||

|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# 拉取代码
|
||||
git clone https://github.com/chatgpt-service.git
|
||||
# Pull source code
|
||||
git clone https://github.com/cookeem/chatgpt-service.git
|
||||
cd chatgpt-service
|
||||
|
||||
# chatGPT的注册页面: https://beta.openai.com/signup
|
||||
# chatGPT的注册教程: https://www.cnblogs.com/damugua/p/16969508.html
|
||||
# chatGPT的APIkey管理界面: https://beta.openai.com/account/api-keys
|
||||
# ChatGPT's registration page: https://beta.openai.com/signup
|
||||
# ChatGPT registration tutorial: https://www.cnblogs.com/damugua/p/16969508.html
|
||||
# ChatGPT API key management page: https://beta.openai.com/account/api-keys
|
||||
|
||||
# 修改config.yaml配置文件,修改appKey,改为你的openai.com的appKey
|
||||
# Modify the config.yaml configuration file, modify the apiKey, and change it to your openai.com API key
|
||||
vi config.yaml
|
||||
# openai的appKey,改为你的apiKey
|
||||
appKey: "xxxxxx"
|
||||
# your openai.com API key
|
||||
apiKey: "xxxxxx"
|
||||
|
||||
# create pictures directory
|
||||
mkdir -p assets
|
||||
chown -R 1000:1000 assets
|
||||
|
||||
# 使用docker启动服务
|
||||
# Start the service with docker-compose
|
||||
docker-compose up -d
|
||||
|
||||
# Check service status
|
||||
docker-compose ps
|
||||
Name Command State Ports
|
||||
-----------------------------------------------------------------------------------------------
|
||||
@@ -37,28 +56,31 @@ chatgpt-service /chatgpt-service/chatgpt-s ... Up 0.0.0.0:59142->9000/t
|
||||
chatgpt-stream /docker-entrypoint.sh ngin ... Up 0.0.0.0:3000->80/tcp,:::3000->80/tcp
|
||||
|
||||
|
||||
# 访问页面,请保证你的服务器可以访问chatGPT的api接口
|
||||
# To access the page, please ensure that your server can access the chatGPT API
|
||||
# http://localhost:3000
|
||||
```
|
||||
|
||||
## 如何编译
|
||||
- Enter the question directly, it will call the ChatGPT interface to return the answer
|
||||
- Enter the picture description after `/image`, it will call the DALL-E2 interface to automatically generate pictures through the picture description
|
||||
|
||||
## How to build
|
||||
|
||||
```bash
|
||||
# 拉取构建依赖
|
||||
# Pull build dependencies
|
||||
go mod tidy
|
||||
# 项目编译
|
||||
# Compile the project
|
||||
go build
|
||||
|
||||
# 执行程序
|
||||
# Run the service
|
||||
./chatgpt-service
|
||||
|
||||
# 相关接口
|
||||
# API url
|
||||
# ws://localhost:9000/api/ws/chat
|
||||
|
||||
# 安装wscat
|
||||
# Install wscat
|
||||
npm install -g wscat
|
||||
|
||||
# 使用wscat测试websocket,然后输入你要查询的问题
|
||||
# Use wscat to test websocket, then enter the question you want to query
|
||||
wscat --connect ws://localhost:9000/api/ws/chat
|
||||
|
||||
```
|
||||
86
README_CN.md
Normal file
86
README_CN.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# 实时ChatGPT服务,支持GPT3/GPT4,支持对话和通过句子生成图片
|
||||
|
||||
- [English README](README.md)
|
||||
- [中文 README](README_CN.md)
|
||||
|
||||
## chatGPT-service和chatGPT-stream
|
||||
|
||||
- chatGPT-service: [https://github.com/cookeem/chatgpt-service](https://github.com/cookeem/chatgpt-service)
|
||||
- chatGPT-service是一个后端服务,用于实时接收chatGPT的消息,并通过websocket的方式实时反馈给chatGPT-stream
|
||||
- chatGPT-stream: [https://github.com/cookeem/chatgpt-stream](https://github.com/cookeem/chatgpt-stream)
|
||||
- chatGPT-stream是一个前端服务,以websocket的方式实时接收chatGPT-service返回的消息
|
||||
|
||||
## gitee传送门
|
||||
|
||||
- [https://gitee.com/cookeem/chatgpt-service](https://gitee.com/cookeem/chatgpt-service)
|
||||
- [https://gitee.com/cookeem/chatgpt-stream](https://gitee.com/cookeem/chatgpt-stream)
|
||||
|
||||
## 效果图
|
||||
|
||||
- 实时对话模式
|
||||
|
||||

|
||||
|
||||
- 通过句子生成图片模式
|
||||
|
||||

|
||||
|
||||
## 快速开始
|
||||
|
||||
```bash
|
||||
# 拉取代码
|
||||
git clone https://github.com/cookeem/chatgpt-service.git
|
||||
cd chatgpt-service
|
||||
|
||||
# chatGPT的注册页面: https://beta.openai.com/signup
|
||||
# chatGPT的注册教程: https://www.cnblogs.com/damugua/p/16969508.html
|
||||
# chatGPT的APIkey管理界面: https://beta.openai.com/account/api-keys
|
||||
|
||||
# 修改config.yaml配置文件,修改apiKey,改为你的openai.com的apiKey
|
||||
vi config.yaml
|
||||
# openai的apiKey,改为你的apiKey
|
||||
apiKey: "xxxxxx"
|
||||
|
||||
# 创建生成的图片目录
|
||||
mkdir -p assets
|
||||
chown -R 1000:1000 assets
|
||||
|
||||
# 使用docker-compose启动服务
|
||||
docker-compose up -d
|
||||
|
||||
# 查看服务状态
|
||||
docker-compose ps
|
||||
Name Command State Ports
|
||||
-----------------------------------------------------------------------------------------------
|
||||
chatgpt-service /chatgpt-service/chatgpt-s ... Up 0.0.0.0:59142->9000/tcp
|
||||
chatgpt-stream /docker-entrypoint.sh ngin ... Up 0.0.0.0:3000->80/tcp,:::3000->80/tcp
|
||||
|
||||
|
||||
# 访问页面,请保证你的服务器可以访问chatGPT的api接口
|
||||
# http://localhost:3000
|
||||
```
|
||||
|
||||
- 直接输入问题,则调用ChatGPT接口返回答案
|
||||
- `/image `后边输入想要的图片描述,则调用DALL-E2接口,通过图片描述自动生成图片
|
||||
|
||||
## 如何编译
|
||||
|
||||
```bash
|
||||
# 拉取构建依赖
|
||||
go mod tidy
|
||||
# 项目编译
|
||||
go build
|
||||
|
||||
# 执行程序
|
||||
./chatgpt-service
|
||||
|
||||
# 相关接口
|
||||
# ws://localhost:9000/api/ws/chat
|
||||
|
||||
# 安装wscat
|
||||
npm install -g wscat
|
||||
|
||||
# 使用wscat测试websocket,然后输入你要查询的问题
|
||||
wscat --connect ws://localhost:9000/api/ws/chat
|
||||
|
||||
```
|
||||
@@ -1,7 +1,10 @@
|
||||
package chat
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
@@ -38,9 +41,48 @@ func (logger Logger) LogPanic(args ...interface{}) {
|
||||
log.Panic(args...)
|
||||
}
|
||||
|
||||
func RandomString(n int) string {
|
||||
var letter []rune
|
||||
lowerChars := "abcdefghijklmnopqrstuvwxyz"
|
||||
numberChars := "0123456789"
|
||||
chars := fmt.Sprintf("%s%s", lowerChars, numberChars)
|
||||
letter = []rune(chars)
|
||||
var str string
|
||||
b := make([]rune, n)
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for i := range b {
|
||||
b[i] = letter[seededRand.Intn(len(letter))]
|
||||
}
|
||||
str = string(b)
|
||||
return str
|
||||
}
|
||||
|
||||
const (
|
||||
StatusFail string = "FAIL"
|
||||
|
||||
pingPeriod = time.Second * 50
|
||||
pingWait = time.Second * 60
|
||||
PingPeriod = time.Second * 50
|
||||
PingWait = time.Second * 60
|
||||
)
|
||||
|
||||
var (
|
||||
GPTModels = []string{
|
||||
openai.GPT432K0314,
|
||||
openai.GPT432K,
|
||||
openai.GPT40314,
|
||||
openai.GPT4,
|
||||
openai.GPT3Dot5Turbo0301,
|
||||
openai.GPT3Dot5Turbo,
|
||||
openai.GPT3TextDavinci003,
|
||||
openai.GPT3TextDavinci002,
|
||||
openai.GPT3TextCurie001,
|
||||
openai.GPT3TextBabbage001,
|
||||
openai.GPT3TextAda001,
|
||||
openai.GPT3TextDavinci001,
|
||||
openai.GPT3DavinciInstructBeta,
|
||||
openai.GPT3Davinci,
|
||||
openai.GPT3CurieInstructBeta,
|
||||
openai.GPT3Curie,
|
||||
openai.GPT3Ada,
|
||||
openai.GPT3Babbage,
|
||||
}
|
||||
)
|
||||
|
||||
430
chat/service.go
430
chat/service.go
@@ -2,17 +2,20 @@ package chat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/websocket"
|
||||
gogpt "github.com/sashabaranov/go-gpt3"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/websocket"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
type Api struct {
|
||||
@@ -45,28 +48,9 @@ func (api *Api) responseFunc(c *gin.Context, startTime time.Time, status, msg st
|
||||
c.JSON(httpStatus, ar)
|
||||
}
|
||||
|
||||
func (api *Api) wsCheckConnectStatus(conn *websocket.Conn, chClose chan int) {
|
||||
var err error
|
||||
defer func() {
|
||||
conn.Close()
|
||||
}()
|
||||
conn.SetReadDeadline(time.Now().Add(pingWait))
|
||||
conn.SetPongHandler(func(s string) error {
|
||||
conn.SetReadDeadline(time.Now().Add(pingWait))
|
||||
return nil
|
||||
})
|
||||
for {
|
||||
_, _, err = conn.ReadMessage()
|
||||
if err != nil {
|
||||
chClose <- 0
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int) {
|
||||
var err error
|
||||
ticker := time.NewTicker(pingPeriod)
|
||||
ticker := time.NewTicker(PingPeriod)
|
||||
|
||||
var mutex = &sync.Mutex{}
|
||||
|
||||
@@ -77,7 +61,7 @@ func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
conn.SetWriteDeadline(time.Now().Add(pingWait))
|
||||
conn.SetWriteDeadline(time.Now().Add(PingWait))
|
||||
mutex.Lock()
|
||||
err = conn.WriteMessage(websocket.PingMessage, nil)
|
||||
if err != nil {
|
||||
@@ -92,24 +76,206 @@ func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int)
|
||||
}
|
||||
}
|
||||
|
||||
func (api *Api) GetChatMessage(conn *websocket.Conn, cli *gogpt.Client, mutex *sync.Mutex, requestMsg string) {
|
||||
func (api *Api) GetChatMessage(conn *websocket.Conn, cli *openai.Client, mutex *sync.Mutex, reqMsgs []openai.ChatCompletionMessage) {
|
||||
var err error
|
||||
var strResp string
|
||||
req := gogpt.CompletionRequest{
|
||||
Model: gogpt.GPT3TextDavinci003,
|
||||
MaxTokens: api.Config.MaxLength,
|
||||
Temperature: 0.6,
|
||||
Prompt: requestMsg,
|
||||
Stream: true,
|
||||
Stop: []string{"\n\n\n"},
|
||||
TopP: 1,
|
||||
FrequencyPenalty: 0.1,
|
||||
PresencePenalty: 0.1,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
stream, err := cli.CreateCompletionStream(ctx, req)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] create chatGPT stream error: %s", err.Error())
|
||||
|
||||
switch api.Config.Model {
|
||||
case openai.GPT3Dot5Turbo0301, openai.GPT3Dot5Turbo, openai.GPT4, openai.GPT40314, openai.GPT432K0314, openai.GPT432K:
|
||||
prompt := reqMsgs[len(reqMsgs)-1].Content
|
||||
req := openai.ChatCompletionRequest{
|
||||
Model: api.Config.Model,
|
||||
MaxTokens: api.Config.MaxLength,
|
||||
Temperature: 1.0,
|
||||
Messages: reqMsgs,
|
||||
Stream: true,
|
||||
TopP: 1,
|
||||
FrequencyPenalty: 0.1,
|
||||
PresencePenalty: 0.1,
|
||||
}
|
||||
|
||||
stream, err := cli.CreateChatCompletionStream(ctx, req)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] create ChatGPT stream model=%s error: %s", api.Config.Model, err.Error())
|
||||
chatMsg := Message{
|
||||
Kind: "error",
|
||||
Msg: err.Error(),
|
||||
MsgId: uuid.New().String(),
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
api.Logger.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
id := uuid.New().String()
|
||||
var i int
|
||||
for {
|
||||
response, err := stream.Recv()
|
||||
if err != nil {
|
||||
var s string
|
||||
var kind string
|
||||
if errors.Is(err, io.EOF) {
|
||||
if i == 0 {
|
||||
s = "[ERROR] NO RESPONSE, PLEASE RETRY"
|
||||
kind = "retry"
|
||||
} else {
|
||||
s = "\n\n###### [END] ######"
|
||||
kind = "chat"
|
||||
}
|
||||
} else {
|
||||
s = fmt.Sprintf("[ERROR] %s", err.Error())
|
||||
kind = "error"
|
||||
}
|
||||
chatMsg := Message{
|
||||
Kind: kind,
|
||||
Msg: s,
|
||||
MsgId: id,
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
break
|
||||
}
|
||||
|
||||
if len(response.Choices) > 0 {
|
||||
var s string
|
||||
if i == 0 {
|
||||
s = fmt.Sprintf("%s# %s\n\n", s, prompt)
|
||||
}
|
||||
for _, choice := range response.Choices {
|
||||
s = s + choice.Delta.Content
|
||||
}
|
||||
strResp = strResp + s
|
||||
chatMsg := Message{
|
||||
Kind: "chat",
|
||||
Msg: s,
|
||||
MsgId: id,
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
}
|
||||
i = i + 1
|
||||
}
|
||||
if strResp != "" {
|
||||
api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp))
|
||||
}
|
||||
case openai.GPT3TextDavinci003, openai.GPT3TextDavinci002, openai.GPT3TextCurie001, openai.GPT3TextBabbage001, openai.GPT3TextAda001, openai.GPT3TextDavinci001, openai.GPT3DavinciInstructBeta, openai.GPT3Davinci, openai.GPT3CurieInstructBeta, openai.GPT3Curie, openai.GPT3Ada, openai.GPT3Babbage:
|
||||
prompt := reqMsgs[len(reqMsgs)-1].Content
|
||||
req := openai.CompletionRequest{
|
||||
Model: api.Config.Model,
|
||||
MaxTokens: api.Config.MaxLength,
|
||||
Temperature: 0.6,
|
||||
Prompt: prompt,
|
||||
Stream: true,
|
||||
//Stop: []string{"\n\n\n"},
|
||||
TopP: 1,
|
||||
FrequencyPenalty: 0.1,
|
||||
PresencePenalty: 0.1,
|
||||
}
|
||||
|
||||
stream, err := cli.CreateCompletionStream(ctx, req)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] create ChatGPT stream model=%s error: %s", api.Config.Model, err.Error())
|
||||
chatMsg := Message{
|
||||
Kind: "error",
|
||||
Msg: err.Error(),
|
||||
MsgId: uuid.New().String(),
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
api.Logger.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
id := uuid.New().String()
|
||||
var i int
|
||||
for {
|
||||
response, err := stream.Recv()
|
||||
if err != nil {
|
||||
var s string
|
||||
var kind string
|
||||
if errors.Is(err, io.EOF) {
|
||||
if i == 0 {
|
||||
s = "[ERROR] NO RESPONSE, PLEASE RETRY"
|
||||
kind = "retry"
|
||||
} else {
|
||||
s = "\n\n###### [END] ######"
|
||||
kind = "chat"
|
||||
}
|
||||
} else {
|
||||
s = fmt.Sprintf("[ERROR] %s", err.Error())
|
||||
kind = "error"
|
||||
}
|
||||
chatMsg := Message{
|
||||
Kind: kind,
|
||||
Msg: s,
|
||||
MsgId: id,
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
break
|
||||
}
|
||||
|
||||
if len(response.Choices) > 0 {
|
||||
var s string
|
||||
if i == 0 {
|
||||
s = fmt.Sprintf("%s# %s\n\n", s, prompt)
|
||||
}
|
||||
for _, choice := range response.Choices {
|
||||
s = s + choice.Text
|
||||
}
|
||||
strResp = strResp + s
|
||||
chatMsg := Message{
|
||||
Kind: "chat",
|
||||
Msg: s,
|
||||
MsgId: id,
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
}
|
||||
i = i + 1
|
||||
}
|
||||
if strResp != "" {
|
||||
api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp))
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("model not exists")
|
||||
api.Logger.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (api *Api) GetImageMessage(conn *websocket.Conn, cli *openai.Client, mutex *sync.Mutex, requestMsg string) {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
prompt := strings.TrimPrefix(requestMsg, "/image ")
|
||||
req := openai.ImageRequest{
|
||||
Prompt: prompt,
|
||||
Size: openai.CreateImageSize256x256,
|
||||
ResponseFormat: openai.CreateImageResponseFormatB64JSON,
|
||||
N: 1,
|
||||
}
|
||||
|
||||
sendError := func(err error) {
|
||||
err = fmt.Errorf("[ERROR] generate image error: %s", err.Error())
|
||||
chatMsg := Message{
|
||||
Kind: "error",
|
||||
Msg: err.Error(),
|
||||
@@ -120,77 +286,56 @@ func (api *Api) GetChatMessage(conn *websocket.Conn, cli *gogpt.Client, mutex *s
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
api.Logger.LogError(err.Error())
|
||||
}
|
||||
|
||||
resp, err := cli.CreateImage(ctx, req)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] generate image error: %s", err.Error())
|
||||
sendError(err)
|
||||
return
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
id := uuid.New().String()
|
||||
var i int
|
||||
for {
|
||||
response, err := stream.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
var s string
|
||||
var kind string
|
||||
if i == 0 {
|
||||
s = "[ERROR] NO RESPONSE, PLEASE RETRY"
|
||||
kind = "retry"
|
||||
} else {
|
||||
s = "\n\n###### [END] ######"
|
||||
kind = "chat"
|
||||
}
|
||||
chatMsg := Message{
|
||||
Kind: kind,
|
||||
Msg: s,
|
||||
MsgId: id,
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
if kind == "retry" {
|
||||
api.Logger.LogError(s)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] receive chatGPT stream error: %s", err.Error())
|
||||
chatMsg := Message{
|
||||
Kind: "error",
|
||||
Msg: err.Error(),
|
||||
MsgId: id,
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
api.Logger.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if len(response.Choices) > 0 {
|
||||
var s string
|
||||
if i == 0 {
|
||||
s = fmt.Sprintf(`%s# %s`, s, requestMsg)
|
||||
}
|
||||
for _, choice := range response.Choices {
|
||||
s = s + choice.Text
|
||||
}
|
||||
strResp = strResp + s
|
||||
chatMsg := Message{
|
||||
Kind: "chat",
|
||||
Msg: s,
|
||||
MsgId: id,
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
}
|
||||
i = i + 1
|
||||
if len(resp.Data) == 0 {
|
||||
err = fmt.Errorf("[ERROR] generate image error: result is empty")
|
||||
sendError(err)
|
||||
return
|
||||
}
|
||||
if strResp != "" {
|
||||
api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s%s", requestMsg, strResp))
|
||||
|
||||
imgBytes, err := base64.StdEncoding.DecodeString(resp.Data[0].B64JSON)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] image base64 decode error: %s", err.Error())
|
||||
sendError(err)
|
||||
return
|
||||
}
|
||||
|
||||
date := time.Now().Format("2006-01-02")
|
||||
imageDir := fmt.Sprintf("assets/images/%s", date)
|
||||
err = os.MkdirAll(imageDir, 0700)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] create image directory error: %s", err.Error())
|
||||
sendError(err)
|
||||
return
|
||||
}
|
||||
|
||||
imageFileName := fmt.Sprintf("%s.png", RandomString(16))
|
||||
err = os.WriteFile(fmt.Sprintf("%s/%s", imageDir, imageFileName), imgBytes, 0600)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("[ERROR] write png image error: %s", err.Error())
|
||||
sendError(err)
|
||||
return
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("api/%s/%s", imageDir, imageFileName)
|
||||
chatMsg := Message{
|
||||
Kind: "image",
|
||||
Msg: msg,
|
||||
MsgId: uuid.New().String(),
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
api.Logger.LogInfo(fmt.Sprintf("[IMAGE] # %s\n%s", requestMsg, msg))
|
||||
return
|
||||
}
|
||||
|
||||
func (api *Api) WsChat(c *gin.Context) {
|
||||
@@ -210,7 +355,7 @@ func (api *Api) WsChat(c *gin.Context) {
|
||||
mutex := &sync.Mutex{}
|
||||
conn, err := wsupgrader.Upgrade(c.Writer, c.Request, nil)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to upgrade websocket %s", err.Error())
|
||||
err = fmt.Errorf("[ERROR] failed to upgrade websocket %s", err.Error())
|
||||
msg = err.Error()
|
||||
api.responseFunc(c, startTime, status, msg, httpStatus, data)
|
||||
return
|
||||
@@ -219,13 +364,18 @@ func (api *Api) WsChat(c *gin.Context) {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
|
||||
conn.SetPongHandler(func(s string) error {
|
||||
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
|
||||
return nil
|
||||
})
|
||||
|
||||
var isClosed bool
|
||||
chClose := make(chan int)
|
||||
chIsCloseSet := make(chan int)
|
||||
defer func() {
|
||||
conn.Close()
|
||||
}()
|
||||
go api.wsCheckConnectStatus(conn, chClose)
|
||||
go api.wsPingMsg(conn, chClose, chIsCloseSet)
|
||||
go func() {
|
||||
for {
|
||||
@@ -238,7 +388,9 @@ func (api *Api) WsChat(c *gin.Context) {
|
||||
}()
|
||||
|
||||
api.Logger.LogInfo(fmt.Sprintf("websocket connection open"))
|
||||
cli := gogpt.NewClient(api.Config.AppKey)
|
||||
cli := openai.NewClient(api.Config.ApiKey)
|
||||
|
||||
reqMsgs := make([]openai.ChatCompletionMessage, 0)
|
||||
|
||||
var latestRequestTime time.Time
|
||||
for {
|
||||
@@ -248,7 +400,7 @@ func (api *Api) WsChat(c *gin.Context) {
|
||||
// read in a message
|
||||
messageType, bs, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read message error: %s", err.Error())
|
||||
err = fmt.Errorf("[ERROR] read message error: %s", err.Error())
|
||||
api.Logger.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
@@ -262,7 +414,7 @@ func (api *Api) WsChat(c *gin.Context) {
|
||||
ok = true
|
||||
} else {
|
||||
if time.Since(latestRequestTime) < time.Second*time.Duration(api.Config.IntervalSeconds) {
|
||||
err = fmt.Errorf("please wait %d seconds for next query", api.Config.IntervalSeconds)
|
||||
err = fmt.Errorf("[ERROR] please wait %d seconds for next query", api.Config.IntervalSeconds)
|
||||
chatMsg := Message{
|
||||
Kind: "error",
|
||||
Msg: err.Error(),
|
||||
@@ -280,7 +432,7 @@ func (api *Api) WsChat(c *gin.Context) {
|
||||
}
|
||||
if ok {
|
||||
if len(strings.Trim(requestMsg, " ")) < 2 {
|
||||
err = fmt.Errorf("message too short")
|
||||
err = fmt.Errorf("[ERROR] message too short")
|
||||
chatMsg := Message{
|
||||
Kind: "error",
|
||||
Msg: err.Error(),
|
||||
@@ -292,26 +444,56 @@ func (api *Api) WsChat(c *gin.Context) {
|
||||
mutex.Unlock()
|
||||
api.Logger.LogError(err.Error())
|
||||
} else {
|
||||
|
||||
chatMsg := Message{
|
||||
Kind: "receive",
|
||||
Msg: requestMsg,
|
||||
MsgId: uuid.New().String(),
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
if strings.HasPrefix(requestMsg, "/image ") {
|
||||
chatMsg := Message{
|
||||
Kind: "receive",
|
||||
Msg: requestMsg,
|
||||
MsgId: uuid.New().String(),
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
go api.GetImageMessage(conn, cli, mutex, requestMsg)
|
||||
} else {
|
||||
chatMsg := Message{
|
||||
Kind: "receive",
|
||||
Msg: requestMsg,
|
||||
MsgId: uuid.New().String(),
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
reqMsgs = append(reqMsgs, openai.ChatCompletionMessage{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: requestMsg,
|
||||
})
|
||||
go api.GetChatMessage(conn, cli, mutex, reqMsgs)
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
go api.GetChatMessage(conn, cli, mutex, requestMsg)
|
||||
}
|
||||
}
|
||||
case websocket.CloseMessage:
|
||||
isClosed = true
|
||||
api.Logger.LogInfo("[CLOSED] websocket receive closed message")
|
||||
case websocket.PingMessage:
|
||||
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
|
||||
api.Logger.LogInfo("[PING] websocket receive ping message")
|
||||
case websocket.PongMessage:
|
||||
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
|
||||
api.Logger.LogInfo("[PONG] websocket receive pong message")
|
||||
default:
|
||||
api.Logger.LogError("websocket receive message type error")
|
||||
err = fmt.Errorf("[ERROR] websocket receive message type not text")
|
||||
chatMsg := Message{
|
||||
Kind: "error",
|
||||
Msg: err.Error(),
|
||||
MsgId: uuid.New().String(),
|
||||
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
|
||||
}
|
||||
mutex.Lock()
|
||||
_ = conn.WriteJSON(chatMsg)
|
||||
mutex.Unlock()
|
||||
api.Logger.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package chat
|
||||
|
||||
type Config struct {
|
||||
AppKey string `yaml:"appKey" json:"appKey" bson:"appKey" validate:"required"`
|
||||
ApiKey string `yaml:"apiKey" json:"apiKey" bson:"apiKey" validate:"required"`
|
||||
Port int `yaml:"port" json:"port" bson:"port" validate:"required"`
|
||||
IntervalSeconds int `yaml:"intervalSeconds" json:"intervalSeconds" bson:"intervalSeconds" validate:"required"`
|
||||
Model string `yaml:"model" json:"model" bson:"model" validate:"required"`
|
||||
MaxLength int `yaml:"maxLength" json:"maxLength" bson:"maxLength" validate:"required"`
|
||||
Cors bool `yaml:"cors" json:"cors" bson:"cors" validate:""`
|
||||
}
|
||||
|
||||
BIN
chatgpt-image.jpeg
Normal file
BIN
chatgpt-image.jpeg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 200 KiB |
16
config.yaml
16
config.yaml
@@ -1,10 +1,20 @@
|
||||
# openai的appKey
|
||||
appKey: "xxxxxx"
|
||||
# Your openai.com API key
|
||||
# openai的API Key
|
||||
apiKey: "xxxxxx"
|
||||
# Service port
|
||||
# 服务端口
|
||||
port: 9000
|
||||
# The time interval for sending questions cannot be less than how long, unit: second
|
||||
# 问题发送的时间间隔不能小于多长时间,单位:秒
|
||||
intervalSeconds: 5
|
||||
# GPT model, if you use the GPT4 model, please ensure that the corresponding openai account has the permission to use the GPT4 model
|
||||
# Available models include: gpt-4-32k-0314, gpt-4-32k, gpt-4-0314, gpt-4, gpt-3.5-turbo-0301, gpt-3.5-turbo, text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, text-davinci-001, davinci-instruct-beta, davinci, curie-instruct-beta, curie, ada, babbage
|
||||
# GPT模型,如果使用GPT4模型,请保证对应的openai账号有GPT4模型的使用权限
|
||||
# 可用的模型包括: gpt-4-32k-0314, gpt-4-32k, gpt-4-0314, gpt-4, gpt-3.5-turbo-0301, gpt-3.5-turbo, text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, text-davinci-001, davinci-instruct-beta, davinci, curie-instruct-beta, curie, ada, babbage
|
||||
model: gpt-3.5-turbo-0301
|
||||
# The maximum length of the returned answer
|
||||
# 返回答案的最大长度
|
||||
maxLength: 1500
|
||||
maxLength: 2000
|
||||
# Whether to allow cors cross-domain
|
||||
# 是否允许cors跨域
|
||||
cors: true
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
version: "3"
|
||||
services:
|
||||
chatgpt-stream:
|
||||
image: "doryengine/chatgpt-stream:v1.0.0"
|
||||
image: "doryengine/chatgpt-stream:v1.0.3"
|
||||
hostname: chatgpt-stream
|
||||
container_name: chatgpt-stream
|
||||
ports:
|
||||
@@ -11,12 +11,13 @@ services:
|
||||
- chatgpt-service
|
||||
restart: always
|
||||
chatgpt-service:
|
||||
image: "doryengine/chatgpt-service:v1.0.0-alpine"
|
||||
image: "doryengine/chatgpt-service:v1.0.3-alpine"
|
||||
hostname: chatgpt-service
|
||||
container_name: chatgpt-service
|
||||
ports:
|
||||
- "9000"
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ./config.yaml:/chatgpt-service/config.yaml
|
||||
- ./assets:/chatgpt-service/assets
|
||||
command: /chatgpt-service/chatgpt-service
|
||||
restart: always
|
||||
|
||||
2
go.mod
2
go.mod
@@ -7,7 +7,7 @@ require (
|
||||
github.com/gin-gonic/gin v1.8.2
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/sashabaranov/go-gpt3 v1.0.0
|
||||
github.com/sashabaranov/go-openai v1.5.7
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
16
main.go
16
main.go
@@ -27,8 +27,19 @@ func main() {
|
||||
logger.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
if config.AppKey == "" {
|
||||
logger.LogError(fmt.Sprintf("appKey is empty"))
|
||||
if config.ApiKey == "" {
|
||||
logger.LogError(fmt.Sprintf("apiKey is empty"))
|
||||
return
|
||||
}
|
||||
var found bool
|
||||
for _, model := range chat.GPTModels {
|
||||
if model == config.Model {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
logger.LogError(fmt.Sprintf("model not exists"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -45,6 +56,7 @@ func main() {
|
||||
}
|
||||
|
||||
groupApi := r.Group("/api")
|
||||
groupApi.Static("/assets", "assets")
|
||||
groupWs := groupApi.Group("/ws")
|
||||
groupWs.GET("chat", api.WsChat)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user