支持GPT4和自定义GPT模型

This commit is contained in:
cookeem
2023-03-22 16:18:38 +08:00
parent cd58d80be9
commit 086bfb3ce9
10 changed files with 239 additions and 106 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -2,11 +2,11 @@ FROM alpine:3.15.3
LABEL maintainer="cookeem"
LABEL email="cookeem@qq.com"
LABEL version="v1.0.1"
LABEL version="v1.0.2"
RUN adduser -h /chatgpt-service -u 1000 -D dory
COPY chatgpt-service /chatgpt-service/
WORKDIR /chatgpt-service
USER dory
# docker build -t doryengine/chatgpt-service:v1.0.1-alpine .
# docker build -t doryengine/chatgpt-service:v1.0.2-alpine .

View File

@@ -31,10 +31,10 @@ cd chatgpt-service
# ChatGPT registration tutorial: https://www.cnblogs.com/damugua/p/16969508.html
# ChatGPT API key management page: https://beta.openai.com/account/api-keys
# Modify the config.yaml configuration file, modify the appKey, and change it to your openai.com API key
# Modify the config.yaml configuration file, modify the apiKey, and change it to your openai.com API key
vi config.yaml
# your openai.com API key
appKey: "xxxxxx"
apiKey: "xxxxxx"
# Start the service with docker-compose

View File

@@ -31,10 +31,10 @@ cd chatgpt-service
# chatGPT的注册教程: https://www.cnblogs.com/damugua/p/16969508.html
# chatGPT的APIkey管理界面: https://beta.openai.com/account/api-keys
# 修改config.yaml配置文件修改appKey改为你的openai.com的appKey
# 修改config.yaml配置文件修改apiKey改为你的openai.com的apiKey
vi config.yaml
# openai的appKey改为你的apiKey
appKey: "xxxxxx"
# openai的apiKey改为你的apiKey
apiKey: "xxxxxx"
# 使用docker-compose启动服务

View File

@@ -1,6 +1,7 @@
package chat
import (
"github.com/sashabaranov/go-openai"
log "github.com/sirupsen/logrus"
"os"
"time"
@@ -41,6 +42,29 @@ func (logger Logger) LogPanic(args ...interface{}) {
const (
StatusFail string = "FAIL"
pingPeriod = time.Second * 50
pingWait = time.Second * 60
PingPeriod = time.Second * 50
PingWait = time.Second * 60
)
var (
GPTModels = []string{
openai.GPT432K0314,
openai.GPT432K,
openai.GPT40314,
openai.GPT4,
openai.GPT3Dot5Turbo0301,
openai.GPT3Dot5Turbo,
openai.GPT3TextDavinci003,
openai.GPT3TextDavinci002,
openai.GPT3TextCurie001,
openai.GPT3TextBabbage001,
openai.GPT3TextAda001,
openai.GPT3TextDavinci001,
openai.GPT3DavinciInstructBeta,
openai.GPT3Davinci,
openai.GPT3CurieInstructBeta,
openai.GPT3Curie,
openai.GPT3Ada,
openai.GPT3Babbage,
}
)

View File

@@ -48,7 +48,7 @@ func (api *Api) responseFunc(c *gin.Context, startTime time.Time, status, msg st
func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int) {
var err error
ticker := time.NewTicker(pingPeriod)
ticker := time.NewTicker(PingPeriod)
var mutex = &sync.Mutex{}
@@ -59,7 +59,7 @@ func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int)
for {
select {
case <-ticker.C:
conn.SetWriteDeadline(time.Now().Add(pingWait))
conn.SetWriteDeadline(time.Now().Add(PingWait))
mutex.Lock()
err = conn.WriteMessage(websocket.PingMessage, nil)
if err != nil {
@@ -77,97 +77,189 @@ func (api *Api) wsPingMsg(conn *websocket.Conn, chClose, chIsCloseSet chan int)
func (api *Api) GetChatMessage(conn *websocket.Conn, cli *openai.Client, mutex *sync.Mutex, requestMsg string) {
var err error
var strResp string
model := openai.GPT3Dot5Turbo0301
req := openai.ChatCompletionRequest{
Model: model,
MaxTokens: api.Config.MaxLength,
Temperature: 1.0,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: requestMsg,
},
},
Stream: true,
TopP: 1,
FrequencyPenalty: 0.1,
PresencePenalty: 0.1,
}
ctx := context.Background()
stream, err := cli.CreateChatCompletionStream(ctx, req)
if err != nil {
err = fmt.Errorf("[ERROR] create chatGPT stream model=%s error: %s", model, err.Error())
chatMsg := Message{
Kind: "error",
Msg: err.Error(),
MsgId: uuid.New().String(),
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
switch api.Config.Model {
case openai.GPT3Dot5Turbo0301, openai.GPT3Dot5Turbo, openai.GPT4, openai.GPT40314, openai.GPT432K0314, openai.GPT432K:
req := openai.ChatCompletionRequest{
Model: api.Config.Model,
MaxTokens: api.Config.MaxLength,
Temperature: 1.0,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: requestMsg,
},
},
Stream: true,
TopP: 1,
FrequencyPenalty: 0.1,
PresencePenalty: 0.1,
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
stream, err := cli.CreateChatCompletionStream(ctx, req)
if err != nil {
err = fmt.Errorf("[ERROR] create chatGPT stream model=%s error: %s", api.Config.Model, err.Error())
chatMsg := Message{
Kind: "error",
Msg: err.Error(),
MsgId: uuid.New().String(),
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
api.Logger.LogError(err.Error())
return
}
defer stream.Close()
id := uuid.New().String()
var i int
for {
response, err := stream.Recv()
if err != nil {
var s string
var kind string
if errors.Is(err, io.EOF) {
if i == 0 {
s = "[ERROR] NO RESPONSE, PLEASE RETRY"
kind = "retry"
} else {
s = "\n\n###### [END] ######"
kind = "chat"
}
} else {
s = fmt.Sprintf("[ERROR] %s", err.Error())
kind = "error"
}
chatMsg := Message{
Kind: kind,
Msg: s,
MsgId: id,
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
break
}
if len(response.Choices) > 0 {
var s string
if i == 0 {
s = fmt.Sprintf(`%s# %s`, s, requestMsg)
}
for _, choice := range response.Choices {
s = s + choice.Delta.Content
}
strResp = strResp + s
chatMsg := Message{
Kind: "chat",
Msg: s,
MsgId: id,
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
}
i = i + 1
}
if strResp != "" {
api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp))
}
case openai.GPT3TextDavinci003, openai.GPT3TextDavinci002, openai.GPT3TextCurie001, openai.GPT3TextBabbage001, openai.GPT3TextAda001, openai.GPT3TextDavinci001, openai.GPT3DavinciInstructBeta, openai.GPT3Davinci, openai.GPT3CurieInstructBeta, openai.GPT3Curie, openai.GPT3Ada, openai.GPT3Babbage:
req := openai.CompletionRequest{
Model: api.Config.Model,
MaxTokens: api.Config.MaxLength,
Temperature: 0.6,
Prompt: requestMsg,
Stream: true,
//Stop: []string{"\n\n\n"},
TopP: 1,
FrequencyPenalty: 0.1,
PresencePenalty: 0.1,
}
stream, err := cli.CreateCompletionStream(ctx, req)
if err != nil {
err = fmt.Errorf("[ERROR] create chatGPT stream model=%s error: %s", api.Config.Model, err.Error())
chatMsg := Message{
Kind: "error",
Msg: err.Error(),
MsgId: uuid.New().String(),
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
api.Logger.LogError(err.Error())
return
}
defer stream.Close()
id := uuid.New().String()
var i int
for {
response, err := stream.Recv()
if err != nil {
var s string
var kind string
if errors.Is(err, io.EOF) {
if i == 0 {
s = "[ERROR] NO RESPONSE, PLEASE RETRY"
kind = "retry"
} else {
s = "\n\n###### [END] ######"
kind = "chat"
}
} else {
s = fmt.Sprintf("[ERROR] %s", err.Error())
kind = "error"
}
chatMsg := Message{
Kind: kind,
Msg: s,
MsgId: id,
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
break
}
if len(response.Choices) > 0 {
var s string
if i == 0 {
s = fmt.Sprintf(`%s# %s`, s, requestMsg)
}
for _, choice := range response.Choices {
s = s + choice.Text
}
strResp = strResp + s
chatMsg := Message{
Kind: "chat",
Msg: s,
MsgId: id,
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
}
i = i + 1
}
if strResp != "" {
api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp))
}
default:
err = fmt.Errorf("model not exists")
api.Logger.LogError(err.Error())
return
}
defer stream.Close()
id := uuid.New().String()
var i int
for {
response, err := stream.Recv()
if err != nil {
var s string
var kind string
if errors.Is(err, io.EOF) {
if i == 0 {
s = "[ERROR] NO RESPONSE, PLEASE RETRY"
kind = "retry"
} else {
s = "\n\n###### [END] ######"
kind = "chat"
}
} else {
s = fmt.Sprintf("[ERROR] %s", err.Error())
kind = "error"
}
chatMsg := Message{
Kind: kind,
Msg: s,
MsgId: id,
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
break
}
if len(response.Choices) > 0 {
var s string
if i == 0 {
s = fmt.Sprintf(`%s# %s`, s, requestMsg)
}
for _, choice := range response.Choices {
s = s + choice.Delta.Content
}
strResp = strResp + s
chatMsg := Message{
Kind: "chat",
Msg: s,
MsgId: id,
CreateTime: time.Now().Format("2006-01-02 15:04:05"),
}
mutex.Lock()
_ = conn.WriteJSON(chatMsg)
mutex.Unlock()
}
i = i + 1
}
if strResp != "" {
api.Logger.LogInfo(fmt.Sprintf("[RESPONSE] %s\n", strResp))
}
}
func (api *Api) WsChat(c *gin.Context) {
@@ -196,9 +288,9 @@ func (api *Api) WsChat(c *gin.Context) {
_ = conn.Close()
}()
_ = conn.SetReadDeadline(time.Now().Add(pingWait))
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
conn.SetPongHandler(func(s string) error {
_ = conn.SetReadDeadline(time.Now().Add(pingWait))
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
return nil
})
@@ -220,7 +312,7 @@ func (api *Api) WsChat(c *gin.Context) {
}()
api.Logger.LogInfo(fmt.Sprintf("websocket connection open"))
cli := openai.NewClient(api.Config.AppKey)
cli := openai.NewClient(api.Config.ApiKey)
var latestRequestTime time.Time
for {
@@ -290,10 +382,10 @@ func (api *Api) WsChat(c *gin.Context) {
isClosed = true
api.Logger.LogInfo("[CLOSED] websocket receive closed message")
case websocket.PingMessage:
_ = conn.SetReadDeadline(time.Now().Add(pingWait))
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
api.Logger.LogInfo("[PING] websocket receive ping message")
case websocket.PongMessage:
_ = conn.SetReadDeadline(time.Now().Add(pingWait))
_ = conn.SetReadDeadline(time.Now().Add(PingWait))
api.Logger.LogInfo("[PONG] websocket receive pong message")
default:
err = fmt.Errorf("[ERROR] websocket receive message type not text")

View File

@@ -1,9 +1,10 @@
package chat
type Config struct {
AppKey string `yaml:"appKey" json:"appKey" bson:"appKey" validate:"required"`
ApiKey string `yaml:"apiKey" json:"apiKey" bson:"apiKey" validate:"required"`
Port int `yaml:"port" json:"port" bson:"port" validate:"required"`
IntervalSeconds int `yaml:"intervalSeconds" json:"intervalSeconds" bson:"intervalSeconds" validate:"required"`
Model string `yaml:"model" json:"model" bson:"model" validate:"required"`
MaxLength int `yaml:"maxLength" json:"maxLength" bson:"maxLength" validate:"required"`
Cors bool `yaml:"cors" json:"cors" bson:"cors" validate:""`
}

View File

@@ -1,12 +1,17 @@
# Your openai.com API key
# openai的API Key
appKey: "xxxxxx"
apiKey: "xxxxxx"
# Service port
# 服务端口
port: 9000
# The time interval for sending questions cannot be less than how long, unit: second
# 问题发送的时间间隔不能小于多长时间,单位:秒
intervalSeconds: 5
# GPT model, if you use the GPT4 model, please ensure that the corresponding openai account has the permission to use the GPT4 model
# Available models include: gpt-4-32k-0314, gpt-4-32k, gpt-4-0314, gpt-4, gpt-3.5-turbo-0301, gpt-3.5-turbo, text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, text-davinci-001, davinci-instruct-beta, davinci, curie-instruct-beta, curie, ada, babbage
# GPT模型如果使用GPT4模型请保证对应的openai账号有GPT4模型的使用权限
# 可用的模型包括: gpt-4-32k-0314, gpt-4-32k, gpt-4-0314, gpt-4, gpt-3.5-turbo-0301, gpt-3.5-turbo, text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, text-davinci-001, davinci-instruct-beta, davinci, curie-instruct-beta, curie, ada, babbage
model: gpt-3.5-turbo-0301
# The maximum length of the returned answer
# 返回答案的最大长度
maxLength: 2000

View File

@@ -1,7 +1,7 @@
version: "3"
services:
chatgpt-stream:
image: "doryengine/chatgpt-stream:v1.0.1"
image: "doryengine/chatgpt-stream:v1.0.2"
hostname: chatgpt-stream
container_name: chatgpt-stream
ports:
@@ -11,7 +11,7 @@ services:
- chatgpt-service
restart: always
chatgpt-service:
image: "doryengine/chatgpt-service:v1.0.1-alpine"
image: "doryengine/chatgpt-service:v1.0.2-alpine"
hostname: chatgpt-service
container_name: chatgpt-service
ports:

15
main.go
View File

@@ -27,8 +27,19 @@ func main() {
logger.LogError(err.Error())
return
}
if config.AppKey == "" {
logger.LogError(fmt.Sprintf("appKey is empty"))
if config.ApiKey == "" {
logger.LogError(fmt.Sprintf("apiKey is empty"))
return
}
var found bool
for _, model := range chat.GPTModels {
if model == config.Model {
found = true
break
}
}
if !found {
logger.LogError(fmt.Sprintf("model not exists"))
return
}