commit 15af942e6eed3fa031fb09c76832068d584a80ad Author: cookeem Date: Fri Feb 10 13:03:52 2023 +0800 readme文档 diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..7709bc6 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "jira-plugin.workingProject": "" +} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d3f5466 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,12 @@ +FROM alpine:3.15.3 + +LABEL maintainer="cookeem" +LABEL email="cookeem@qq.com" +LABEL version="v1.0.0" + +RUN adduser -h /chatgpt-service -u 1000 -D dory +COPY chatgpt-service /chatgpt-service/ +WORKDIR /chatgpt-service +USER dory + +# docker build -t doryengine/chatgpt-service:v1.0.0-alpine . diff --git a/README.md b/README.md new file mode 100644 index 0000000..4cce146 --- /dev/null +++ b/README.md @@ -0,0 +1,59 @@ +# 实时ChatGPT服务 + +## chatGPT-service和chatGPT-stream + +- chatGPT-service: [https://github.com/cookeem/chatgpt-service](https://github.com/cookeem/chatgpt-service) + - chatGPT-service是一个后端服务,用于实时接收chatGPT的消息,并通过websocket的方式实时反馈给chatGPT-stream +- chatGPT-stream: [https://github.com/cookeem/chatgpt-stream](https://github.com/cookeem/chatgpt-stream) + - chatGPT-stream是一个前端服务,以websocket的方式实时接收chatGPT-service返回的消息 + +## 效果图 + +![](chatgpt-service.gif) + + +## 快速开始 + +```bash +# 拉取代码 +git clone https://github.com/chatgpt-service.git +cd chatgpt-service + +# chatGPT的注册页面: https://beta.openai.com/signup +# chatGPT的注册教程: https://www.cnblogs.com/damugua/p/16969508.html +# chatGPT的APIkey管理界面: https://beta.openai.com/account/api-keys + +# 修改config.yaml配置文件,修改appKey,改为你的openai.com的appKey +vi config.yaml +# openai的appKey,改为你的apiKey +appKey: "xxxxxx" + + +# 使用docker启动服务 +docker-compose up -d + +# 访问页面 +# http://localhost:3000 +``` + +## 如何编译 + +```bash +# 拉取构建依赖 +go mod tidy +# 项目编译 +go build + +# 执行程序 +./chatgpt-service + +# 相关接口 +# ws://localhost:9000/api/ws/chat + +# 安装wscat +npm install -g wscat + +# 使用wscat测试websocket,然后输入你要查询的问题 +wscat --connect ws://localhost:9000/api/ws/chat + +``` \ No newline at end of file diff --git a/chat/common.go b/chat/common.go new file mode 100644 index 0000000..991bcad --- /dev/null +++ b/chat/common.go @@ -0,0 +1,46 @@ +package chat + +import ( + log "github.com/sirupsen/logrus" + "os" + "time" +) + +type Logger struct { +} + +func (logger *Logger) LoggerInit() { + log.SetFormatter(&log.TextFormatter{ + FullTimestamp: true, + TimestampFormat: "2006-01-02 15:04:05.000", + }) + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) +} + +func (logger Logger) LogDebug(args ...interface{}) { + log.Debug(args...) +} + +func (logger Logger) LogInfo(args ...interface{}) { + log.Info(args...) +} + +func (logger Logger) LogWarn(args ...interface{}) { + log.Warn(args...) +} + +func (logger Logger) LogError(args ...interface{}) { + log.Error(args...) +} + +func (logger Logger) LogPanic(args ...interface{}) { + log.Panic(args...) +} + +const ( + StatusFail string = "FAIL" + + pingPeriod = time.Second * 50 + pingWait = time.Second * 60 +) diff --git a/chat/service.go b/chat/service.go new file mode 100644 index 0000000..f132539 --- /dev/null +++ b/chat/service.go @@ -0,0 +1,318 @@ +package chat + +import ( + "context" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/gorilla/websocket" + gogpt "github.com/sashabaranov/go-gpt3" + "io" + "net/http" + "strings" + "sync" + "time" +) + +type Api struct { + Config Config + Logger +} + +type ApiResponse struct { + Status string `yaml:"status" json:"status" bson:"status" validate:""` + Msg string `yaml:"msg" json:"msg" bson:"msg" validate:""` + Duration string `yaml:"duration" json:"duration" bson:"duration" validate:""` + Data interface{} `yaml:"data" json:"data" bson:"data" validate:""` +} + +type Message struct { + Msg string `yaml:"msg" json:"msg" bson:"msg" validate:""` + MsgId string `yaml:"msgId" json:"msgId" bson:"msgId" validate:""` + Kind string `yaml:"kind" json:"kind" bson:"kind" validate:""` + CreateTime string `yaml:"createTime" json:"createTime" bson:"createTime" validate:""` +} + +func (api *Api) responseFunc(c *gin.Context, startTime time.Time, status, msg string, httpStatus int, data map[string]interface{}) { + duration := time.Since(startTime) + ar := ApiResponse{ + Status: status, + Msg: msg, + Duration: duration.String(), + Data: data, + } + c.JSON(httpStatus, ar) +} + +func (api *Api) wsCheckConnectStatus(conn *websocket.Conn, chClose chan int) { + var err error + defer func() { + conn.Close() + }() + conn.SetReadDeadline(time.Now().Add(pingWait)) + conn.SetPongHandler(func(s string) error { + conn.SetReadDeadline(time.Now().Add(pingWait)) + return nil + }) + for { + _, _, err = conn.ReadMessage() + if err != nil { + chClose <- 0 + return + } + } +} + +func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int) { + var err error + ticker := time.NewTicker(pingPeriod) + + var mutex = &sync.Mutex{} + + defer func() { + ticker.Stop() + conn.Close() + }() + for { + select { + case <-ticker.C: + conn.SetWriteDeadline(time.Now().Add(pingWait)) + mutex.Lock() + err = conn.WriteMessage(websocket.PingMessage, nil) + if err != nil { + return + } + mutex.Unlock() + case <-chClose: + api.LogInfo(fmt.Sprintf("# websocket connection closed")) + chIsCloseSet <- 0 + return + } + } +} + +func (api *Api) GetChatMessage(conn *websocket.Conn, cli *gogpt.Client, mutex *sync.Mutex, requestMsg string) { + var strResp string + req := gogpt.CompletionRequest{ + Model: gogpt.GPT3TextDavinci003, + MaxTokens: api.Config.MaxLength, + Temperature: 0.6, + Prompt: requestMsg, + Stream: true, + Stop: []string{"\n\n\n"}, + TopP: 1, + FrequencyPenalty: 0.1, + PresencePenalty: 0.1, + } + + ctx := context.Background() + stream, err := cli.CreateCompletionStream(ctx, req) + if err != nil { + err = fmt.Errorf("[ERROR] create chatGPT stream error: %s", err.Error()) + chatMsg := Message{ + Kind: "error", + Msg: err.Error(), + MsgId: uuid.New().String(), + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + api.Logger.LogError(err.Error()) + return + } + defer stream.Close() + + id := uuid.New().String() + var i int + for { + response, err := stream.Recv() + if errors.Is(err, io.EOF) { + var s string + var kind string + if i == 0 { + s = "[ERROR] NO RESPONSE, PLEASE RETRY" + kind = "retry" + } else { + s = "\n\n###### [END] ######" + kind = "chat" + } + chatMsg := Message{ + Kind: kind, + Msg: s, + MsgId: id, + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + if kind == "retry" { + api.Logger.LogError(s) + } + break + } + if err != nil { + err = fmt.Errorf("[ERROR] receive chatGPT stream error: %s", err.Error()) + chatMsg := Message{ + Kind: "error", + Msg: err.Error(), + MsgId: id, + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + api.Logger.LogError(err.Error()) + return + } + + if len(response.Choices) > 0 { + var s string + if i == 0 { + s = fmt.Sprintf(`%s# %s`, s, requestMsg) + } + for _, choice := range response.Choices { + s = s + choice.Text + } + strResp = strResp + s + chatMsg := Message{ + Kind: "chat", + Msg: s, + MsgId: id, + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + } + i = i + 1 + } + if strResp != "" { + api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s%s", requestMsg, strResp)) + } +} + +func (api *Api) WsChat(c *gin.Context) { + startTime := time.Now() + status := StatusFail + msg := "" + httpStatus := http.StatusForbidden + data := map[string]interface{}{} + + wsupgrader := websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } + wsupgrader.CheckOrigin = func(r *http.Request) bool { + return true + } + mutex := &sync.Mutex{} + conn, err := wsupgrader.Upgrade(c.Writer, c.Request, nil) + if err != nil { + err = fmt.Errorf("failed to upgrade websocket %s", err.Error()) + msg = err.Error() + api.responseFunc(c, startTime, status, msg, httpStatus, data) + return + } + defer func() { + _ = conn.Close() + }() + + var isClosed bool + chClose := make(chan int) + chIsCloseSet := make(chan int) + defer func() { + conn.Close() + }() + go api.wsCheckConnectStatus(conn, chClose) + go api.wsPingMsg(conn, chClose, chIsCloseSet) + go func() { + for { + select { + case <-chIsCloseSet: + isClosed = true + return + } + } + }() + + api.Logger.LogInfo(fmt.Sprintf("websocket connection open")) + cli := gogpt.NewClient(api.Config.AppKey) + + var latestRequestTime time.Time + for { + if isClosed { + return + } + // read in a message + messageType, bs, err := conn.ReadMessage() + if err != nil { + err = fmt.Errorf("read message error: %s", err.Error()) + api.Logger.LogError(err.Error()) + return + } + switch messageType { + case websocket.TextMessage: + requestMsg := string(bs) + api.Logger.LogInfo(fmt.Sprintf("[REQUEST] %s", requestMsg)) + var ok bool + if latestRequestTime.IsZero() { + latestRequestTime = time.Now() + ok = true + } else { + if time.Since(latestRequestTime) < time.Second*time.Duration(api.Config.IntervalSeconds) { + err = fmt.Errorf("please wait %d seconds for next query", api.Config.IntervalSeconds) + chatMsg := Message{ + Kind: "error", + Msg: err.Error(), + MsgId: uuid.New().String(), + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + api.Logger.LogError(err.Error()) + } else { + ok = true + latestRequestTime = time.Now() + } + } + if ok { + if len(strings.Trim(requestMsg, " ")) < 2 { + err = fmt.Errorf("message too short") + chatMsg := Message{ + Kind: "error", + Msg: err.Error(), + MsgId: uuid.New().String(), + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + api.Logger.LogError(err.Error()) + } else { + + chatMsg := Message{ + Kind: "receive", + Msg: requestMsg, + MsgId: uuid.New().String(), + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + go api.GetChatMessage(conn, cli, mutex, requestMsg) + } + } + case websocket.CloseMessage: + isClosed = true + api.Logger.LogInfo("[CLOSED] websocket receive closed message") + case websocket.PingMessage: + api.Logger.LogInfo("[PING] websocket receive ping message") + default: + api.Logger.LogError("websocket receive message type error") + return + } + } +} diff --git a/chat/types.go b/chat/types.go new file mode 100644 index 0000000..b8e0484 --- /dev/null +++ b/chat/types.go @@ -0,0 +1,9 @@ +package chat + +type Config struct { + AppKey string `yaml:"appKey" json:"appKey" bson:"appKey" validate:"required"` + Port int `yaml:"port" json:"port" bson:"port" validate:"required"` + IntervalSeconds int `yaml:"intervalSeconds" json:"intervalSeconds" bson:"intervalSeconds" validate:"required"` + MaxLength int `yaml:"maxLength" json:"maxLength" bson:"maxLength" validate:"required"` + Cors bool `yaml:"cors" json:"cors" bson:"cors" validate:""` +} diff --git a/chatgpt-service.gif b/chatgpt-service.gif new file mode 100644 index 0000000..acddb6e Binary files /dev/null and b/chatgpt-service.gif differ diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..de0398c --- /dev/null +++ b/config.yaml @@ -0,0 +1,10 @@ +# openai的appKey +appKey: "xxxxxx" +# 服务端口 +port: 9000 +# 问题发送的时间间隔不能小于多长时间,单位:秒 +intervalSeconds: 5 +# 返回答案的最大长度 +maxLength: 1500 +# 是否允许cors跨域 +cors: true diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..9df4372 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,22 @@ +version: "3" +services: + chatgpt-stream: + image: "doryengine/chatgpt-stream:v1.0.0" + hostname: chatgpt-stream + container_name: chatgpt-stream + ports: + - "3000:80" + # - "443:443" + depends_on: + - chatgpt-service + restart: always + chatgpt-service: + image: "doryengine/chatgpt-service:v1.0.0-alpine" + hostname: chatgpt-service + container_name: chatgpt-service + ports: + - "9000" + volumes: + - ./config.yaml:/chatgpt-service/config.yaml + command: /chatgpt-service/chatgpt-service + restart: always diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..6bb71b7 --- /dev/null +++ b/go.mod @@ -0,0 +1,34 @@ +module chatgpt-service + +go 1.18 + +require ( + github.com/gin-contrib/cors v1.4.0 + github.com/gin-gonic/gin v1.8.2 + github.com/google/uuid v1.3.0 + github.com/gorilla/websocket v1.5.0 + github.com/sashabaranov/go-gpt3 v1.0.0 + github.com/sirupsen/logrus v1.9.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-playground/validator/v10 v10.11.1 // indirect + github.com/goccy/go-json v0.9.11 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/ugorji/go/codec v1.2.7 // indirect + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/main.go b/main.go new file mode 100644 index 0000000..d2558ee --- /dev/null +++ b/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "chatgpt-service/chat" + "fmt" + "os" + + "github.com/gin-contrib/cors" + "github.com/gin-gonic/gin" + "gopkg.in/yaml.v3" +) + +func main() { + logger := chat.Logger{} + logger.LoggerInit() + + bs, err := os.ReadFile("config.yaml") + if err != nil { + err = fmt.Errorf("read file config.yaml error: %s", err.Error()) + logger.LogError(err.Error()) + return + } + var config chat.Config + err = yaml.Unmarshal(bs, &config) + if err != nil { + err = fmt.Errorf("parse config.yaml error: %s", err.Error()) + logger.LogError(err.Error()) + return + } + if config.AppKey == "" { + logger.LogError(fmt.Sprintf("appKey is empty")) + return + } + + api := chat.Api{ + Config: config, + Logger: logger, + } + r := gin.Default() + if config.Cors { + cfg := cors.DefaultConfig() + cfg.AllowAllOrigins = true + cfg.AllowHeaders = []string{"content-type"} + r.Use(cors.New(cfg)) + } + + groupApi := r.Group("/api") + groupWs := groupApi.Group("/ws") + groupWs.GET("chat", api.WsChat) + + logger.LogInfo("chatGPT query service start") + err = r.Run(fmt.Sprintf(":%d", config.Port)) + if err != nil { + err = fmt.Errorf("run service error: %s", err.Error()) + logger.LogPanic(err.Error()) + return + } +} diff --git a/nginx.conf b/nginx.conf new file mode 100644 index 0000000..98b4057 --- /dev/null +++ b/nginx.conf @@ -0,0 +1,84 @@ + +user nginx; +worker_processes auto; + +error_log /var/log/nginx/error.log notice; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + client_max_body_size 20M; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + server { + listen 80; + server_name localhost; + + root /chatgpt-stream; + index index.html index.htm; + #access_log /var/log/nginx/host.access.log main; + + + location /api/ { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Host $host; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_pass http://chatgpt-service:9000; + max_ranges 0; + } + + #error_page 404 /404.html; + + # redirect server error pages to the static page /50x.html + # + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + + # proxy the PHP scripts to Apache listening on 127.0.0.1:80 + # + #location ~ \.php$ { + # proxy_pass http://127.0.0.1; + #} + + # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 + # + #location ~ \.php$ { + # root html; + # fastcgi_pass 127.0.0.1:9000; + # fastcgi_index index.php; + # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; + # include fastcgi_params; + #} + + # deny access to .htaccess files, if Apache's document root + # concurs with nginx's one + # + #location ~ /\.ht { + # deny all; + #} + } +}