From 437da3d5b31c5014d8221ddf2e8d6cde833f19f0 Mon Sep 17 00:00:00 2001 From: ivan deng Date: Tue, 18 Apr 2023 01:16:41 +0800 Subject: [PATCH 1/2] =?UTF-8?q?fix=20#I6WHKN=20[app=5Fchatgpt]=E6=95=8F?= =?UTF-8?q?=E6=84=9F=E8=AF=AD=E5=A4=84=E7=90=86=E6=9C=89=E9=97=AE=E9=A2=98?= =?UTF-8?q?=EF=BC=8C=E8=A6=81=E4=BF=AE=20=E7=BD=AE=E9=A1=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app_chatgpt/models/ai_robot.py | 69 +++++++++++++++----------- app_chatgpt/models/lib/sensi_words.txt | 3 -- app_chatgpt/models/mail_channel.py | 22 ++++---- 3 files changed, 48 insertions(+), 46 deletions(-) diff --git a/app_chatgpt/models/ai_robot.py b/app_chatgpt/models/ai_robot.py index 421e6ba6..2d2b6b8a 100644 --- a/app_chatgpt/models/ai_robot.py +++ b/app_chatgpt/models/ai_robot.py @@ -40,7 +40,7 @@ GPT-3 A set of models that can understand and generate natural language # begin gpt 参数 # 1. stop:表示聊天机器人停止生成回复的条件,可以是一段文本或者一个列表,当聊天机器人生成的回复中包含了这个条件,就会停止继续生成回复。 # 2. temperature:控制回复的“新颖度”,值越高,聊天机器人生成的回复越不确定和随机,值越低,聊天机器人生成的回复会更加可预测和常规化。 - # 3. top_p:与temperature有些类似,也是控制回复的“新颖度”。不同的是,top_p控制的是回复中概率最高的几个可能性的累计概率之和,值越小,生成的回复越保守,值越大,生成的回复越新颖。 + # 3. top_p:言语连贯性,与temperature有些类似,也是控制回复的“新颖度”。不同的是,top_p控制的是回复中概率最高的几个可能性的累计概率之和,值越小,生成的回复越保守,值越大,生成的回复越新颖。 # 4. frequency_penalty:用于控制聊天机器人回复中出现频率过高的词汇的惩罚程度。聊天机器人会尝试避免在回复中使用频率较高的词汇,以提高回复的多样性和新颖度。 # 5. presence_penalty:与frequency_penalty相对,用于控制聊天机器人回复中出现频率较低的词汇的惩罚程度。聊天机器人会尝试在回复中使用频率较低的词汇,以提高回复的多样性和新颖度。 max_tokens = fields.Integer('Max response', default=600, @@ -50,7 +50,7 @@ GPT-3 A set of models that can understand and generate natural language (including system message, examples, message history, and user query) and the model's response. One token is roughly 4 characters for typical English text. """) - temperature = fields.Float(string='Temperature', default=0.9, + temperature = fields.Float(string='Temperature', default=0.8, help=""" Controls randomness. Lowering the temperature means that the model will produce more repetitive and deterministic responses. @@ -104,39 +104,47 @@ GPT-3 A set of models that can understand and generate natural language def action_disconnect(self): requests.delete('https://chatgpt.com/v1/disconnect') - - def get_ai(self, data, author_id=False, answer_id=False, param={}): - # 通用方法 - # author_id: 请求的 partner_id 对象 - # answer_id: 回答的 partner_id 对象 - # kwargs,dict 形式的可变参数 - self.ensure_one() - # 前置勾子,一般返回 False,有问题返回响应内容 - res_pre = self.get_ai_pre(data, author_id, answer_id, param) - if res_pre: - return res_pre - if hasattr(self, 'get_%s' % self.provider): - res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param) - else: - res = _('No robot provider found') - - # 后置勾子,返回处理后的内容,用于处理敏感词等 - res_post = self.get_ai_post(res, author_id, answer_id, param) - return res_post def get_ai_pre(self, data, author_id=False, answer_id=False, param={}): if self.is_filtering: search = WordsSearch() search.SetKeywords([]) - content = data[0]['content'] + if isinstance(data, list): + content = data[len(data)-1]['content'] + else: + content = data sensi = search.FindFirst(content) if sensi is not None: + _logger.error('==========敏感词:%s' % sensi['Keyword']) return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。') else: return False + + def get_ai(self, data, author_id=False, answer_id=False, param={}): + # 通用方法 + # author_id: 请求的 partner_id 对象 + # answer_id: 回答的 partner_id 对象 + # param,dict 形式的参数 + # 调整输出为2个参数:res_post详细内容,is_ai是否ai的响应 + + self.ensure_one() + # 前置勾子,一般返回 False,有问题返回响应内容,用于处理敏感词等 + res_pre = self.get_ai_pre(data, author_id, answer_id, param) + if res_pre: + # 有错误内容,则返回上级内容及 is_ai为假 + return res_pre, False + if not hasattr(self, 'get_%s' % self.provider): + res = _('No robot provider found') + return res, False + + res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param) + # 后置勾子,返回处理后的内容 + res_post, is_ai = self.get_ai_post(res, author_id, answer_id, param) + return res_post, is_ai def get_ai_post(self, res, author_id=False, answer_id=False, param={}): if res and author_id and isinstance(res, openai.openai_object.OpenAIObject) or isinstance(res, list): + # 返回是个对象,那么就是ai usage = json.loads(json.dumps(res['usage'])) content = json.loads(json.dumps(res['choices'][0]['message']['content'])) data = content.replace(' .', '.').strip() @@ -169,9 +177,10 @@ GPT-3 A set of models that can understand and generate natural language 'first_ask_time': ask_date }) ai_use.write(vals) + return data, True else: - data = res - return data + # 直接返回错误语句,那么就是非ai + return res, False def get_ai_system(self, content=None): # 获取基础ai角色设定, role system @@ -221,7 +230,7 @@ GPT-3 A set of models that can understand and generate natural language # 处理传参,传过来的优先于 robot 默认的 max_tokens = param.get('max_tokens') or self.max_tokens or 600, - temperature = param.get('temperature') or self.temperature or 0.9, + temperature = param.get('temperature') or self.temperature or 0.8, top_p = param.get('top_p') or self.top_p or 0.6, frequency_penalty = param.get('frequency_penalty') or self.frequency_penalty or 0.5, presence_penalty = param.get('presence_penalty') or self.presence_penalty or 0.5, @@ -250,7 +259,7 @@ GPT-3 A set of models that can understand and generate natural language model=self.ai_model, messages=messages, n=1, - temperature=self.temperature or 0.9, + temperature=self.temperature or 0.8, max_tokens=self.max_tokens or 600, top_p=self.top_p or 0.6, frequency_penalty=self.frequency_penalty or 0.5, @@ -276,7 +285,7 @@ GPT-3 A set of models that can understand and generate natural language pdata = { "model": self.ai_model, "prompt": data, - "temperature": 0.9, + "temperature": 0.8, "max_tokens": max_tokens, "top_p": 1, "frequency_penalty": 0.0, @@ -311,9 +320,9 @@ GPT-3 A set of models that can understand and generate natural language else: messages = [{"role": "user", "content": data}] - # 处理传参,传过来的优先于 robot 默认的 + # todo: 处理传参,传过来的优先于 robot 默认的,当前有问题,无法做tuple转换 max_tokens = param.get('max_tokens') or self.max_tokens or 600, - temperature = param.get('temperature') or self.temperature or 0.9, + temperature = param.get('temperature') or self.temperature or 0.8, top_p = param.get('top_p') or self.top_p or 0.6, frequency_penalty = param.get('frequency_penalty') or self.frequency_penalty or 0.5, presence_penalty = param.get('presence_penalty') or self.presence_penalty or 0.5, @@ -330,7 +339,7 @@ GPT-3 A set of models that can understand and generate natural language messages=messages, # 返回的回答数量 n=1, - temperature=self.temperature or 0.9, + temperature=self.temperature or 0.8, max_tokens=self.max_tokens or 600, top_p=self.top_p or 0.6, frequency_penalty=self.frequency_penalty or 0.5, diff --git a/app_chatgpt/models/lib/sensi_words.txt b/app_chatgpt/models/lib/sensi_words.txt index 37e23ea8..dd51bf72 100644 --- a/app_chatgpt/models/lib/sensi_words.txt +++ b/app_chatgpt/models/lib/sensi_words.txt @@ -10567,7 +10567,6 @@ fuck兴奋剂 禽獸 明慧网 jiejie -的 嫩穴 床上激情自拍图 6。4 @@ -13533,7 +13532,6 @@ re xue ying hao 烂 淫荡老师 小姨子的小嫩屄 -create 亚洲激情BT 省长的儿媳妇 苹果日报 @@ -14077,7 +14075,6 @@ gong fu 89年的鬥爭 台湾十八电影 小淫虫电影 -CREATE 外阴 外??挂 毛爷爷复活 diff --git a/app_chatgpt/models/mail_channel.py b/app_chatgpt/models/mail_channel.py index 886383c9..6edb9190 100644 --- a/app_chatgpt/models/mail_channel.py +++ b/app_chatgpt/models/mail_channel.py @@ -63,7 +63,7 @@ class Channel(models.Model): answer_id = user_id.partner_id # todo: 只有个人配置的群聊才给配置 param = self.get_ai_config(ai) - res = ai.get_ai(messages, author_id, answer_id, param) + res, is_ai = ai.get_ai(messages, author_id, answer_id, param) if res: res = res.replace('\n', '
') channel.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id) @@ -144,22 +144,18 @@ class Channel(models.Model): if not msg: return rdata # api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key') - api_key = '' - if ai: + # ai处理,不要自问自答 + if ai and answer_id != message.author_id: api_key = ai.openapi_api_key if not api_key: _logger.warning(_("ChatGPT Robot【%s】have not set open api key.")) return rdata - try: - openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60 - except: - openapi_context_timeout = 60 - sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config') - openai.api_key = api_key - # print(msg_vals) - # print(msg_vals.get('record_name', '')) - # print('self.channel_type :',self.channel_type) - if ai: + try: + openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60 + except: + openapi_context_timeout = 60 + sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config') + openai.api_key = api_key # 非4版本,取0次。其它取3 次历史 chat_count = 0 if '4' in ai.ai_model else 3 if author_id != answer_id.id and self.channel_type == 'chat': From 845358621f0f227b6c546871e7eace27ae4fa538 Mon Sep 17 00:00:00 2001 From: ivan deng Date: Tue, 18 Apr 2023 05:14:11 +0800 Subject: [PATCH 2/2] =?UTF-8?q?v=E6=9C=8D=E5=8A=A1=E5=99=A8=E8=BD=AC?= =?UTF-8?q?=E5=8F=91=E5=A4=84=E7=90=86=EF=BC=8Cnginx=E5=8F=AF=E8=83=BD?= =?UTF-8?q?=E5=90=8E=E7=BB=AD=E8=A6=81=E5=86=8D=E8=B0=83=E6=95=B4=E9=85=8D?= =?UTF-8?q?=E7=BD=AE=EF=BC=8C=E4=BD=BF=E7=94=A8=E4=B8=80=E4=B8=AA=20toai.p?= =?UTF-8?q?y=20=E6=9D=A5=E5=A4=84=E7=90=86=E8=BD=AC=E5=8F=91=E8=AF=B7?= =?UTF-8?q?=E6=B1=82=EF=BC=8C=E7=BB=86=E8=8A=82=E5=BE=85Fix?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app_chatgpt/data/mail_channel_data.xml | 12 ++-- app_chatgpt/models/ai_robot.py | 87 ++++++++++++++++---------- app_chatgpt/models/lib/sensi_words.txt | 3 - 3 files changed, 59 insertions(+), 43 deletions(-) diff --git a/app_chatgpt/data/mail_channel_data.xml b/app_chatgpt/data/mail_channel_data.xml index 20003202..e4c8c70c 100644 --- a/app_chatgpt/data/mail_channel_data.xml +++ b/app_chatgpt/data/mail_channel_data.xml @@ -17,12 +17,12 @@

Please ask me any question.

]]> - - - - - - + + + + + + diff --git a/app_chatgpt/models/ai_robot.py b/app_chatgpt/models/ai_robot.py index 2d2b6b8a..5c5de645 100644 --- a/app_chatgpt/models/ai_robot.py +++ b/app_chatgpt/models/ai_robot.py @@ -229,12 +229,12 @@ GPT-3 A set of models that can understand and generate natural language o_url = self.endpoint or "https://api.openai.com/v1/chat/completions" # 处理传参,传过来的优先于 robot 默认的 - max_tokens = param.get('max_tokens') or self.max_tokens or 600, - temperature = param.get('temperature') or self.temperature or 0.8, - top_p = param.get('top_p') or self.top_p or 0.6, - frequency_penalty = param.get('frequency_penalty') or self.frequency_penalty or 0.5, - presence_penalty = param.get('presence_penalty') or self.presence_penalty or 0.5, - # request_timeout = param.get('request_timeout') or self.ai_timeout or 120, + max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens + temperature = param.get('temperature') if param.get('temperature') else self.temperature + top_p = param.get('top_p') if param.get('top_p') else self.top_p + frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty + presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty + request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout if self.stop: stop = self.stop.split(',') @@ -254,21 +254,40 @@ GPT-3 A set of models that can understand and generate natural language sys_content = self.get_ai_system(param.get('sys_content')) if sys_content: messages.insert(0, sys_content) - # 暂时不变 - response = openai.ChatCompletion.create( - model=self.ai_model, - messages=messages, - n=1, - temperature=self.temperature or 0.8, - max_tokens=self.max_tokens or 600, - top_p=self.top_p or 0.6, - frequency_penalty=self.frequency_penalty or 0.5, - presence_penalty=self.presence_penalty or 0.5, - stop=stop, - request_timeout=self.ai_timeout or 120, - ) - if 'choices' in response: - return response + # todo: 当前反向代理方式不通,要调整为 远程主机中接受请求,post到openai,再将结果返回给请求者 + # response = openai.ChatCompletion.create( + # model=self.ai_model, + # messages=messages, + # # 返回的回答数量 + # n=1, + # max_tokens=max_tokens, + # temperature=temperature, + # top_p=top_p, + # frequency_penalty=frequency_penalty, + # presence_penalty=presence_penalty, + # stop=stop, + # request_timeout=request_timeout, + # ) + # if 'choices' in response: + # return response + # todo: 两种方式一样,要调整 v 服务器的二次处理 /root/toai.py + pdata = { + "model": self.ai_model, + "messages": messages, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "frequency_penalty": frequency_penalty, + "presence_penalty": presence_penalty, + "stop": stop + } + response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT) + try: + res = response.json() + if 'choices' in res: + return res + except Exception as e: + _logger.warning("Get Response Json failed: %s", e) else: _logger.warning('=====================Openai output data: %s' % response) elif self.ai_model == 'dall-e2': @@ -320,13 +339,13 @@ GPT-3 A set of models that can understand and generate natural language else: messages = [{"role": "user", "content": data}] - # todo: 处理传参,传过来的优先于 robot 默认的,当前有问题,无法做tuple转换 - max_tokens = param.get('max_tokens') or self.max_tokens or 600, - temperature = param.get('temperature') or self.temperature or 0.8, - top_p = param.get('top_p') or self.top_p or 0.6, - frequency_penalty = param.get('frequency_penalty') or self.frequency_penalty or 0.5, - presence_penalty = param.get('presence_penalty') or self.presence_penalty or 0.5, - # request_timeout = param.get('request_timeout') or self.ai_timeout or 120, + # 处理传参,传过来的优先于 robot 默认的 + max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens + temperature = param.get('temperature') if param.get('temperature') else self.temperature + top_p = param.get('top_p') if param.get('top_p') else self.top_p + frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty + presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty + request_timeout= param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout # Ai角色设定,如果没设定则再处理 if messages[0].get('role') != 'system': @@ -339,13 +358,13 @@ GPT-3 A set of models that can understand and generate natural language messages=messages, # 返回的回答数量 n=1, - temperature=self.temperature or 0.8, - max_tokens=self.max_tokens or 600, - top_p=self.top_p or 0.6, - frequency_penalty=self.frequency_penalty or 0.5, - presence_penalty=self.presence_penalty or 0.5, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, stop=stop, - request_timeout=self.ai_timeout or 120, + request_timeout=request_timeout, ) if 'choices' in response: return response diff --git a/app_chatgpt/models/lib/sensi_words.txt b/app_chatgpt/models/lib/sensi_words.txt index dd51bf72..70167d7f 100644 --- a/app_chatgpt/models/lib/sensi_words.txt +++ b/app_chatgpt/models/lib/sensi_words.txt @@ -6060,7 +6060,6 @@ cctv wan wang zhi wang 抽插27岁处女meimei淫叫不断 中天萬方 -代理 亮穴 机战 成人 激情 淫 另类 @@ -9911,7 +9910,6 @@ Tokyo Hot n0188 田中爱 毛卖逼 插 操 干 骚穴 嫩穴 小穴 密穴 淫水流 兔meimei色色贴图区 -功 毛夜情 宁王府成人论坛导航 岩崎 Sky Angel Vol。27 @@ -14359,7 +14357,6 @@ secretchina 江泽民靠 錢 其 琛 和妹妹做爱 -服务器 性交做爱 毛李鹏 狗卵子