diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..f9d47a1 Binary files /dev/null and b/.DS_Store differ diff --git a/Dockerfile b/Dockerfile index dbccbc1..f1eaec4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,11 +2,11 @@ FROM alpine:3.15.3 LABEL maintainer="cookeem" LABEL email="cookeem@qq.com" -LABEL version="v1.0.1" +LABEL version="v1.0.2" RUN adduser -h /chatgpt-service -u 1000 -D dory COPY chatgpt-service /chatgpt-service/ WORKDIR /chatgpt-service USER dory -# docker build -t doryengine/chatgpt-service:v1.0.1-alpine . +# docker build -t doryengine/chatgpt-service:v1.0.2-alpine . diff --git a/README.md b/README.md index e644b29..82bd418 100644 --- a/README.md +++ b/README.md @@ -31,10 +31,10 @@ cd chatgpt-service # ChatGPT registration tutorial: https://www.cnblogs.com/damugua/p/16969508.html # ChatGPT API key management page: https://beta.openai.com/account/api-keys -# Modify the config.yaml configuration file, modify the appKey, and change it to your openai.com API key +# Modify the config.yaml configuration file, modify the apiKey, and change it to your openai.com API key vi config.yaml # your openai.com API key -appKey: "xxxxxx" +apiKey: "xxxxxx" # Start the service with docker-compose diff --git a/README_CN.md b/README_CN.md index b951279..84aea68 100644 --- a/README_CN.md +++ b/README_CN.md @@ -31,10 +31,10 @@ cd chatgpt-service # chatGPT的注册教程: https://www.cnblogs.com/damugua/p/16969508.html # chatGPT的APIkey管理界面: https://beta.openai.com/account/api-keys -# 修改config.yaml配置文件,修改appKey,改为你的openai.com的appKey +# 修改config.yaml配置文件,修改apiKey,改为你的openai.com的apiKey vi config.yaml -# openai的appKey,改为你的apiKey -appKey: "xxxxxx" +# openai的apiKey,改为你的apiKey +apiKey: "xxxxxx" # 使用docker-compose启动服务 diff --git a/chat/common.go b/chat/common.go index 991bcad..dca30db 100644 --- a/chat/common.go +++ b/chat/common.go @@ -1,6 +1,7 @@ package chat import ( + "github.com/sashabaranov/go-openai" log "github.com/sirupsen/logrus" "os" "time" @@ -41,6 +42,29 @@ func (logger Logger) LogPanic(args ...interface{}) { const ( StatusFail string = "FAIL" - pingPeriod = time.Second * 50 - pingWait = time.Second * 60 + PingPeriod = time.Second * 50 + PingWait = time.Second * 60 +) + +var ( + GPTModels = []string{ + openai.GPT432K0314, + openai.GPT432K, + openai.GPT40314, + openai.GPT4, + openai.GPT3Dot5Turbo0301, + openai.GPT3Dot5Turbo, + openai.GPT3TextDavinci003, + openai.GPT3TextDavinci002, + openai.GPT3TextCurie001, + openai.GPT3TextBabbage001, + openai.GPT3TextAda001, + openai.GPT3TextDavinci001, + openai.GPT3DavinciInstructBeta, + openai.GPT3Davinci, + openai.GPT3CurieInstructBeta, + openai.GPT3Curie, + openai.GPT3Ada, + openai.GPT3Babbage, + } ) diff --git a/chat/service.go b/chat/service.go index 2bec4c7..ae9db64 100644 --- a/chat/service.go +++ b/chat/service.go @@ -48,7 +48,7 @@ func (api *Api) responseFunc(c *gin.Context, startTime time.Time, status, msg st func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int) { var err error - ticker := time.NewTicker(pingPeriod) + ticker := time.NewTicker(PingPeriod) var mutex = &sync.Mutex{} @@ -59,7 +59,7 @@ func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int) for { select { case <-ticker.C: - conn.SetWriteDeadline(time.Now().Add(pingWait)) + conn.SetWriteDeadline(time.Now().Add(PingWait)) mutex.Lock() err = conn.WriteMessage(websocket.PingMessage, nil) if err != nil { @@ -77,97 +77,189 @@ func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int) func (api *Api) GetChatMessage(conn *websocket.Conn, cli *openai.Client, mutex *sync.Mutex, requestMsg string) { var err error var strResp string - model := openai.GPT3Dot5Turbo0301 - req := openai.ChatCompletionRequest{ - Model: model, - MaxTokens: api.Config.MaxLength, - Temperature: 1.0, - Messages: []openai.ChatCompletionMessage{ - { - Role: openai.ChatMessageRoleUser, - Content: requestMsg, - }, - }, - Stream: true, - TopP: 1, - FrequencyPenalty: 0.1, - PresencePenalty: 0.1, - } ctx := context.Background() - stream, err := cli.CreateChatCompletionStream(ctx, req) - if err != nil { - err = fmt.Errorf("[ERROR] create chatGPT stream model=%s error: %s", model, err.Error()) - chatMsg := Message{ - Kind: "error", - Msg: err.Error(), - MsgId: uuid.New().String(), - CreateTime: time.Now().Format("2006-01-02 15:04:05"), + switch api.Config.Model { + case openai.GPT3Dot5Turbo0301, openai.GPT3Dot5Turbo, openai.GPT4, openai.GPT40314, openai.GPT432K0314, openai.GPT432K: + req := openai.ChatCompletionRequest{ + Model: api.Config.Model, + MaxTokens: api.Config.MaxLength, + Temperature: 1.0, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: requestMsg, + }, + }, + Stream: true, + TopP: 1, + FrequencyPenalty: 0.1, + PresencePenalty: 0.1, } - mutex.Lock() - _ = conn.WriteJSON(chatMsg) - mutex.Unlock() + + stream, err := cli.CreateChatCompletionStream(ctx, req) + if err != nil { + err = fmt.Errorf("[ERROR] create chatGPT stream model=%s error: %s", api.Config.Model, err.Error()) + chatMsg := Message{ + Kind: "error", + Msg: err.Error(), + MsgId: uuid.New().String(), + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + api.Logger.LogError(err.Error()) + return + } + defer stream.Close() + + id := uuid.New().String() + var i int + for { + response, err := stream.Recv() + if err != nil { + var s string + var kind string + if errors.Is(err, io.EOF) { + if i == 0 { + s = "[ERROR] NO RESPONSE, PLEASE RETRY" + kind = "retry" + } else { + s = "\n\n###### [END] ######" + kind = "chat" + } + } else { + s = fmt.Sprintf("[ERROR] %s", err.Error()) + kind = "error" + } + chatMsg := Message{ + Kind: kind, + Msg: s, + MsgId: id, + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + break + } + + if len(response.Choices) > 0 { + var s string + if i == 0 { + s = fmt.Sprintf(`%s# %s`, s, requestMsg) + } + for _, choice := range response.Choices { + s = s + choice.Delta.Content + } + strResp = strResp + s + chatMsg := Message{ + Kind: "chat", + Msg: s, + MsgId: id, + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + } + i = i + 1 + } + if strResp != "" { + api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp)) + } + case openai.GPT3TextDavinci003, openai.GPT3TextDavinci002, openai.GPT3TextCurie001, openai.GPT3TextBabbage001, openai.GPT3TextAda001, openai.GPT3TextDavinci001, openai.GPT3DavinciInstructBeta, openai.GPT3Davinci, openai.GPT3CurieInstructBeta, openai.GPT3Curie, openai.GPT3Ada, openai.GPT3Babbage: + req := openai.CompletionRequest{ + Model: api.Config.Model, + MaxTokens: api.Config.MaxLength, + Temperature: 0.6, + Prompt: requestMsg, + Stream: true, + //Stop: []string{"\n\n\n"}, + TopP: 1, + FrequencyPenalty: 0.1, + PresencePenalty: 0.1, + } + + stream, err := cli.CreateCompletionStream(ctx, req) + if err != nil { + err = fmt.Errorf("[ERROR] create chatGPT stream model=%s error: %s", api.Config.Model, err.Error()) + chatMsg := Message{ + Kind: "error", + Msg: err.Error(), + MsgId: uuid.New().String(), + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + api.Logger.LogError(err.Error()) + return + } + defer stream.Close() + + id := uuid.New().String() + var i int + for { + response, err := stream.Recv() + if err != nil { + var s string + var kind string + if errors.Is(err, io.EOF) { + if i == 0 { + s = "[ERROR] NO RESPONSE, PLEASE RETRY" + kind = "retry" + } else { + s = "\n\n###### [END] ######" + kind = "chat" + } + } else { + s = fmt.Sprintf("[ERROR] %s", err.Error()) + kind = "error" + } + chatMsg := Message{ + Kind: kind, + Msg: s, + MsgId: id, + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + break + } + + if len(response.Choices) > 0 { + var s string + if i == 0 { + s = fmt.Sprintf(`%s# %s`, s, requestMsg) + } + for _, choice := range response.Choices { + s = s + choice.Text + } + strResp = strResp + s + chatMsg := Message{ + Kind: "chat", + Msg: s, + MsgId: id, + CreateTime: time.Now().Format("2006-01-02 15:04:05"), + } + mutex.Lock() + _ = conn.WriteJSON(chatMsg) + mutex.Unlock() + } + i = i + 1 + } + if strResp != "" { + api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp)) + } + default: + err = fmt.Errorf("model not exists") api.Logger.LogError(err.Error()) return } - defer stream.Close() - - id := uuid.New().String() - var i int - for { - response, err := stream.Recv() - if err != nil { - var s string - var kind string - if errors.Is(err, io.EOF) { - if i == 0 { - s = "[ERROR] NO RESPONSE, PLEASE RETRY" - kind = "retry" - } else { - s = "\n\n###### [END] ######" - kind = "chat" - } - } else { - s = fmt.Sprintf("[ERROR] %s", err.Error()) - kind = "error" - } - chatMsg := Message{ - Kind: kind, - Msg: s, - MsgId: id, - CreateTime: time.Now().Format("2006-01-02 15:04:05"), - } - mutex.Lock() - _ = conn.WriteJSON(chatMsg) - mutex.Unlock() - break - } - - if len(response.Choices) > 0 { - var s string - if i == 0 { - s = fmt.Sprintf(`%s# %s`, s, requestMsg) - } - for _, choice := range response.Choices { - s = s + choice.Delta.Content - } - strResp = strResp + s - chatMsg := Message{ - Kind: "chat", - Msg: s, - MsgId: id, - CreateTime: time.Now().Format("2006-01-02 15:04:05"), - } - mutex.Lock() - _ = conn.WriteJSON(chatMsg) - mutex.Unlock() - } - i = i + 1 - } - if strResp != "" { - api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp)) - } } func (api *Api) WsChat(c *gin.Context) { @@ -196,9 +288,9 @@ func (api *Api) WsChat(c *gin.Context) { _ = conn.Close() }() - _ = conn.SetReadDeadline(time.Now().Add(pingWait)) + _ = conn.SetReadDeadline(time.Now().Add(PingWait)) conn.SetPongHandler(func(s string) error { - _ = conn.SetReadDeadline(time.Now().Add(pingWait)) + _ = conn.SetReadDeadline(time.Now().Add(PingWait)) return nil }) @@ -220,7 +312,7 @@ func (api *Api) WsChat(c *gin.Context) { }() api.Logger.LogInfo(fmt.Sprintf("websocket connection open")) - cli := openai.NewClient(api.Config.AppKey) + cli := openai.NewClient(api.Config.ApiKey) var latestRequestTime time.Time for { @@ -290,10 +382,10 @@ func (api *Api) WsChat(c *gin.Context) { isClosed = true api.Logger.LogInfo("[CLOSED] websocket receive closed message") case websocket.PingMessage: - _ = conn.SetReadDeadline(time.Now().Add(pingWait)) + _ = conn.SetReadDeadline(time.Now().Add(PingWait)) api.Logger.LogInfo("[PING] websocket receive ping message") case websocket.PongMessage: - _ = conn.SetReadDeadline(time.Now().Add(pingWait)) + _ = conn.SetReadDeadline(time.Now().Add(PingWait)) api.Logger.LogInfo("[PONG] websocket receive pong message") default: err = fmt.Errorf("[ERROR] websocket receive message type not text") diff --git a/chat/types.go b/chat/types.go index b8e0484..9853fb3 100644 --- a/chat/types.go +++ b/chat/types.go @@ -1,9 +1,10 @@ package chat type Config struct { - AppKey string `yaml:"appKey" json:"appKey" bson:"appKey" validate:"required"` + ApiKey string `yaml:"apiKey" json:"apiKey" bson:"apiKey" validate:"required"` Port int `yaml:"port" json:"port" bson:"port" validate:"required"` IntervalSeconds int `yaml:"intervalSeconds" json:"intervalSeconds" bson:"intervalSeconds" validate:"required"` + Model string `yaml:"model" json:"model" bson:"model" validate:"required"` MaxLength int `yaml:"maxLength" json:"maxLength" bson:"maxLength" validate:"required"` Cors bool `yaml:"cors" json:"cors" bson:"cors" validate:""` } diff --git a/config.yaml b/config.yaml index 6ee0fef..df1dda3 100644 --- a/config.yaml +++ b/config.yaml @@ -1,12 +1,17 @@ # Your openai.com API key # openai的API Key -appKey: "xxxxxx" +apiKey: "xxxxxx" # Service port # 服务端口 port: 9000 # The time interval for sending questions cannot be less than how long, unit: second # 问题发送的时间间隔不能小于多长时间,单位:秒 intervalSeconds: 5 +# GPT model, if you use the GPT4 model, please ensure that the corresponding openai account has the permission to use the GPT4 model +# Available models include: gpt-4-32k-0314, gpt-4-32k, gpt-4-0314, gpt-4, gpt-3.5-turbo-0301, gpt-3.5-turbo, text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, text-davinci-001, davinci-instruct-beta, davinci, curie-instruct-beta, curie, ada, babbage +# GPT模型,如果使用GPT4模型,请保证对应的openai账号有GPT4模型的使用权限 +# 可用的模型包括: gpt-4-32k-0314, gpt-4-32k, gpt-4-0314, gpt-4, gpt-3.5-turbo-0301, gpt-3.5-turbo, text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, text-davinci-001, davinci-instruct-beta, davinci, curie-instruct-beta, curie, ada, babbage +model: gpt-3.5-turbo-0301 # The maximum length of the returned answer # 返回答案的最大长度 maxLength: 2000 diff --git a/docker-compose.yaml b/docker-compose.yaml index b763ca8..d272c6a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,7 +1,7 @@ version: "3" services: chatgpt-stream: - image: "doryengine/chatgpt-stream:v1.0.1" + image: "doryengine/chatgpt-stream:v1.0.2" hostname: chatgpt-stream container_name: chatgpt-stream ports: @@ -11,7 +11,7 @@ services: - chatgpt-service restart: always chatgpt-service: - image: "doryengine/chatgpt-service:v1.0.1-alpine" + image: "doryengine/chatgpt-service:v1.0.2-alpine" hostname: chatgpt-service container_name: chatgpt-service ports: diff --git a/main.go b/main.go index d2558ee..ec097c4 100644 --- a/main.go +++ b/main.go @@ -27,8 +27,19 @@ func main() { logger.LogError(err.Error()) return } - if config.AppKey == "" { - logger.LogError(fmt.Sprintf("appKey is empty")) + if config.ApiKey == "" { + logger.LogError(fmt.Sprintf("apiKey is empty")) + return + } + var found bool + for _, model := range chat.GPTModels { + if model == config.Model { + found = true + break + } + } + if !found { + logger.LogError(fmt.Sprintf("model not exists")) return }