This commit is contained in:
Chill
2023-04-18 13:50:39 +08:00
4 changed files with 101 additions and 83 deletions

View File

@@ -17,12 +17,12 @@
<p>Please ask me any question.</p>]]></field> <p>Please ask me any question.</p>]]></field>
</record> </record>
<record model="mail.channel.member" id="channel_member_chatgtp_channel_for_admin"> <!-- <record model="mail.channel.member" id="channel_member_chatgtp_channel_for_admin">-->
<field name="partner_id" ref="base.partner_admin"/> <!-- <field name="partner_id" ref="base.partner_admin"/>-->
<field name="channel_id" ref="app_chatgpt.channel_chatgpt"/> <!-- <field name="channel_id" ref="app_chatgpt.channel_chatgpt"/>-->
<field name="fetched_message_id" ref="app_chatgpt.module_install_notification"/> <!-- <field name="fetched_message_id" ref="app_chatgpt.module_install_notification"/>-->
<field name="seen_message_id" ref="app_chatgpt.module_install_notification"/> <!-- <field name="seen_message_id" ref="app_chatgpt.module_install_notification"/>-->
</record> <!-- </record>-->
<record model="mail.channel" id="app_chatgpt.channel_chatgpt"> <record model="mail.channel" id="app_chatgpt.channel_chatgpt">
<field name="group_ids" eval="[Command.link(ref('base.group_user'))]"/> <field name="group_ids" eval="[Command.link(ref('base.group_user'))]"/>

View File

@@ -40,7 +40,7 @@ GPT-3 A set of models that can understand and generate natural language
# begin gpt 参数 # begin gpt 参数
# 1. stop表示聊天机器人停止生成回复的条件可以是一段文本或者一个列表当聊天机器人生成的回复中包含了这个条件就会停止继续生成回复。 # 1. stop表示聊天机器人停止生成回复的条件可以是一段文本或者一个列表当聊天机器人生成的回复中包含了这个条件就会停止继续生成回复。
# 2. temperature控制回复的“新颖度”值越高聊天机器人生成的回复越不确定和随机值越低聊天机器人生成的回复会更加可预测和常规化。 # 2. temperature控制回复的“新颖度”值越高聊天机器人生成的回复越不确定和随机值越低聊天机器人生成的回复会更加可预测和常规化。
# 3. top_p与temperature有些类似也是控制回复的“新颖度”。不同的是top_p控制的是回复中概率最高的几个可能性的累计概率之和值越小生成的回复越保守值越大生成的回复越新颖。 # 3. top_p言语连贯性,与temperature有些类似也是控制回复的“新颖度”。不同的是top_p控制的是回复中概率最高的几个可能性的累计概率之和值越小生成的回复越保守值越大生成的回复越新颖。
# 4. frequency_penalty用于控制聊天机器人回复中出现频率过高的词汇的惩罚程度。聊天机器人会尝试避免在回复中使用频率较高的词汇以提高回复的多样性和新颖度。 # 4. frequency_penalty用于控制聊天机器人回复中出现频率过高的词汇的惩罚程度。聊天机器人会尝试避免在回复中使用频率较高的词汇以提高回复的多样性和新颖度。
# 5. presence_penalty与frequency_penalty相对用于控制聊天机器人回复中出现频率较低的词汇的惩罚程度。聊天机器人会尝试在回复中使用频率较低的词汇以提高回复的多样性和新颖度。 # 5. presence_penalty与frequency_penalty相对用于控制聊天机器人回复中出现频率较低的词汇的惩罚程度。聊天机器人会尝试在回复中使用频率较低的词汇以提高回复的多样性和新颖度。
max_tokens = fields.Integer('Max response', default=600, max_tokens = fields.Integer('Max response', default=600,
@@ -50,7 +50,7 @@ GPT-3 A set of models that can understand and generate natural language
(including system message, examples, message history, and user query) and the model's response. (including system message, examples, message history, and user query) and the model's response.
One token is roughly 4 characters for typical English text. One token is roughly 4 characters for typical English text.
""") """)
temperature = fields.Float(string='Temperature', default=0.9, temperature = fields.Float(string='Temperature', default=0.8,
help=""" help="""
Controls randomness. Lowering the temperature means that the model will produce Controls randomness. Lowering the temperature means that the model will produce
more repetitive and deterministic responses. more repetitive and deterministic responses.
@@ -104,39 +104,47 @@ GPT-3 A set of models that can understand and generate natural language
def action_disconnect(self): def action_disconnect(self):
requests.delete('https://chatgpt.com/v1/disconnect') requests.delete('https://chatgpt.com/v1/disconnect')
def get_ai(self, data, author_id=False, answer_id=False, param={}):
# 通用方法
# author_id: 请求的 partner_id 对象
# answer_id: 回答的 partner_id 对象
# kwargsdict 形式的可变参数
self.ensure_one()
# 前置勾子,一般返回 False有问题返回响应内容
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
if res_pre:
return res_pre
if hasattr(self, 'get_%s' % self.provider):
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
else:
res = _('No robot provider found')
# 后置勾子,返回处理后的内容,用于处理敏感词等
res_post = self.get_ai_post(res, author_id, answer_id, param)
return res_post
def get_ai_pre(self, data, author_id=False, answer_id=False, param={}): def get_ai_pre(self, data, author_id=False, answer_id=False, param={}):
if self.is_filtering: if self.is_filtering:
search = WordsSearch() search = WordsSearch()
search.SetKeywords([]) search.SetKeywords([])
content = data[0]['content'] if isinstance(data, list):
content = data[len(data)-1]['content']
else:
content = data
sensi = search.FindFirst(content) sensi = search.FindFirst(content)
if sensi is not None: if sensi is not None:
_logger.error('==========敏感词:%s' % sensi['Keyword'])
return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。') return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')
else: else:
return False return False
def get_ai(self, data, author_id=False, answer_id=False, param={}):
# 通用方法
# author_id: 请求的 partner_id 对象
# answer_id: 回答的 partner_id 对象
# paramdict 形式的参数
# 调整输出为2个参数res_post详细内容is_ai是否ai的响应
self.ensure_one()
# 前置勾子,一般返回 False有问题返回响应内容用于处理敏感词等
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
if res_pre:
# 有错误内容,则返回上级内容及 is_ai为假
return res_pre, False
if not hasattr(self, 'get_%s' % self.provider):
res = _('No robot provider found')
return res, False
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
# 后置勾子,返回处理后的内容
res_post, is_ai = self.get_ai_post(res, author_id, answer_id, param)
return res_post, is_ai
def get_ai_post(self, res, author_id=False, answer_id=False, param={}): def get_ai_post(self, res, author_id=False, answer_id=False, param={}):
if res and author_id and isinstance(res, openai.openai_object.OpenAIObject) or isinstance(res, list): if res and author_id and isinstance(res, openai.openai_object.OpenAIObject) or isinstance(res, list):
# 返回是个对象那么就是ai
usage = json.loads(json.dumps(res['usage'])) usage = json.loads(json.dumps(res['usage']))
content = json.loads(json.dumps(res['choices'][0]['message']['content'])) content = json.loads(json.dumps(res['choices'][0]['message']['content']))
data = content.replace(' .', '.').strip() data = content.replace(' .', '.').strip()
@@ -170,9 +178,10 @@ GPT-3 A set of models that can understand and generate natural language
'first_ask_time': ask_date 'first_ask_time': ask_date
}) })
ai_use.write(vals) ai_use.write(vals)
return data, True
else: else:
data = res # 直接返回错误语句那么就是非ai
return data return res, False
def get_ai_system(self, content=None): def get_ai_system(self, content=None):
# 获取基础ai角色设定, role system # 获取基础ai角色设定, role system
@@ -221,12 +230,12 @@ GPT-3 A set of models that can understand and generate natural language
o_url = self.endpoint or "https://api.openai.com/v1/chat/completions" o_url = self.endpoint or "https://api.openai.com/v1/chat/completions"
# 处理传参,传过来的优先于 robot 默认的 # 处理传参,传过来的优先于 robot 默认的
max_tokens = param.get('max_tokens') or self.max_tokens or 600, max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
temperature = param.get('temperature') or self.temperature or 0.9, temperature = param.get('temperature') if param.get('temperature') else self.temperature
top_p = param.get('top_p') or self.top_p or 0.6, top_p = param.get('top_p') if param.get('top_p') else self.top_p
frequency_penalty = param.get('frequency_penalty') or self.frequency_penalty or 0.5, frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
presence_penalty = param.get('presence_penalty') or self.presence_penalty or 0.5, presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
# request_timeout = param.get('request_timeout') or self.ai_timeout or 120, request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
if self.stop: if self.stop:
stop = self.stop.split(',') stop = self.stop.split(',')
@@ -246,21 +255,40 @@ GPT-3 A set of models that can understand and generate natural language
sys_content = self.get_ai_system(param.get('sys_content')) sys_content = self.get_ai_system(param.get('sys_content'))
if sys_content: if sys_content:
messages.insert(0, sys_content) messages.insert(0, sys_content)
# 暂时不变 # todo: 当前反向代理方式不通,要调整为 远程主机中接受请求post到openai再将结果返回给请求者
response = openai.ChatCompletion.create( # response = openai.ChatCompletion.create(
model=self.ai_model, # model=self.ai_model,
messages=messages, # messages=messages,
n=1, # # 返回的回答数量
temperature=self.temperature or 0.9, # n=1,
max_tokens=self.max_tokens or 600, # max_tokens=max_tokens,
top_p=self.top_p or 0.6, # temperature=temperature,
frequency_penalty=self.frequency_penalty or 0.5, # top_p=top_p,
presence_penalty=self.presence_penalty or 0.5, # frequency_penalty=frequency_penalty,
stop=stop, # presence_penalty=presence_penalty,
request_timeout=self.ai_timeout or 120, # stop=stop,
) # request_timeout=request_timeout,
if 'choices' in response: # )
return response # if 'choices' in response:
# return response
# todo: 两种方式一样,要调整 v 服务器的二次处理 /root/toai.py
pdata = {
"model": self.ai_model,
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"stop": stop
}
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
try:
res = response.json()
if 'choices' in res:
return res
except Exception as e:
_logger.warning("Get Response Json failed: %s", e)
else: else:
_logger.warning('=====================Openai output data: %s' % response) _logger.warning('=====================Openai output data: %s' % response)
elif self.ai_model == 'dall-e2': elif self.ai_model == 'dall-e2':
@@ -277,7 +305,7 @@ GPT-3 A set of models that can understand and generate natural language
pdata = { pdata = {
"model": self.ai_model, "model": self.ai_model,
"prompt": data, "prompt": data,
"temperature": 0.9, "temperature": 0.8,
"max_tokens": max_tokens, "max_tokens": max_tokens,
"top_p": 1, "top_p": 1,
"frequency_penalty": 0.0, "frequency_penalty": 0.0,
@@ -313,12 +341,12 @@ GPT-3 A set of models that can understand and generate natural language
messages = [{"role": "user", "content": data}] messages = [{"role": "user", "content": data}]
# 处理传参,传过来的优先于 robot 默认的 # 处理传参,传过来的优先于 robot 默认的
max_tokens = param.get('max_tokens') or self.max_tokens or 600, max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
temperature = param.get('temperature') or self.temperature or 0.9, temperature = param.get('temperature') if param.get('temperature') else self.temperature
top_p = param.get('top_p') or self.top_p or 0.6, top_p = param.get('top_p') if param.get('top_p') else self.top_p
frequency_penalty = param.get('frequency_penalty') or self.frequency_penalty or 0.5, frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
presence_penalty = param.get('presence_penalty') or self.presence_penalty or 0.5, presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
# request_timeout = param.get('request_timeout') or self.ai_timeout or 120, request_timeout= param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
# Ai角色设定如果没设定则再处理 # Ai角色设定如果没设定则再处理
if messages[0].get('role') != 'system': if messages[0].get('role') != 'system':
@@ -331,13 +359,13 @@ GPT-3 A set of models that can understand and generate natural language
messages=messages, messages=messages,
# 返回的回答数量 # 返回的回答数量
n=1, n=1,
temperature=self.temperature or 0.9, max_tokens=max_tokens,
max_tokens=self.max_tokens or 600, temperature=temperature,
top_p=self.top_p or 0.6, top_p=top_p,
frequency_penalty=self.frequency_penalty or 0.5, frequency_penalty=frequency_penalty,
presence_penalty=self.presence_penalty or 0.5, presence_penalty=presence_penalty,
stop=stop, stop=stop,
request_timeout=self.ai_timeout or 120, request_timeout=request_timeout,
) )
if 'choices' in response: if 'choices' in response:
return response return response

View File

@@ -6060,7 +6060,6 @@ cctv
抽插27岁处女meimei淫叫不断 抽插27岁处女meimei淫叫不断
中天萬方 中天萬方
代理
亮穴 亮穴
机战 机战
成人 激情 淫 另类 成人 激情 淫 另类
@@ -9911,7 +9910,6 @@ Tokyo Hot n0188 田中爱
毛卖逼 毛卖逼
插 操 干 骚穴 嫩穴 小穴 密穴 淫水流 插 操 干 骚穴 嫩穴 小穴 密穴 淫水流
兔meimei色色贴图区 兔meimei色色贴图区
毛夜情 毛夜情
宁王府成人论坛导航 宁王府成人论坛导航
岩崎 Sky Angel Vol。27 岩崎 Sky Angel Vol。27
@@ -10567,7 +10565,6 @@ fuck兴奋剂
禽獸 禽獸
明慧网 明慧网
jiejie jiejie
嫩穴 嫩穴
床上激情自拍图 床上激情自拍图
6。4 6。4
@@ -13533,7 +13530,6 @@ re xue ying hao
淫荡老师 淫荡老师
小姨子的小嫩屄 小姨子的小嫩屄
create
亚洲激情BT 亚洲激情BT
省长的儿媳妇 省长的儿媳妇
苹果日报 苹果日报
@@ -14077,7 +14073,6 @@ gong fu
89年的鬥爭 89年的鬥爭
台湾十八电影 台湾十八电影
小淫虫电影 小淫虫电影
CREATE
外阴 外阴
外??挂 外??挂
毛爷爷复活 毛爷爷复活
@@ -14362,7 +14357,6 @@ secretchina
江泽民靠 江泽民靠
錢 其 琛 錢 其 琛
和妹妹做爱 和妹妹做爱
服务器
性交做爱 性交做爱
毛李鹏 毛李鹏
狗卵子 狗卵子

View File

@@ -63,7 +63,7 @@ class Channel(models.Model):
answer_id = user_id.partner_id answer_id = user_id.partner_id
# todo: 只有个人配置的群聊才给配置 # todo: 只有个人配置的群聊才给配置
param = self.get_ai_config(ai) param = self.get_ai_config(ai)
res = ai.get_ai(messages, author_id, answer_id, param) res, is_ai = ai.get_ai(messages, author_id, answer_id, param)
if res: if res:
res = res.replace('\n', '<br/>') res = res.replace('\n', '<br/>')
channel.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id) channel.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id)
@@ -144,22 +144,18 @@ class Channel(models.Model):
if not msg: if not msg:
return rdata return rdata
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key') # api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
api_key = '' # ai处理不要自问自答
if ai: if ai and answer_id != message.author_id:
api_key = ai.openapi_api_key api_key = ai.openapi_api_key
if not api_key: if not api_key:
_logger.warning(_("ChatGPT Robot【%s】have not set open api key.")) _logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
return rdata return rdata
try: try:
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60 openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60
except: except:
openapi_context_timeout = 60 openapi_context_timeout = 60
sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config') sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config')
openai.api_key = api_key openai.api_key = api_key
# print(msg_vals)
# print(msg_vals.get('record_name', ''))
# print('self.channel_type :',self.channel_type)
if ai:
# 非4版本取0次。其它取3 次历史 # 非4版本取0次。其它取3 次历史
chat_count = 0 if '4' in ai.ai_model else 3 chat_count = 0 if '4' in ai.ai_model else 3
if author_id != answer_id.id and self.channel_type == 'chat': if author_id != answer_id.id and self.channel_type == 'chat':