Merge remote-tracking branch 'origin/16.0' into 16.0

This commit is contained in:
ivan deng
2023-05-15 19:40:04 +08:00
3 changed files with 39 additions and 14 deletions

View File

@@ -102,6 +102,8 @@ GPT-3 A set of models that can understand and generate natural language
sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.')
is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt')
max_send_char = fields.Integer('Max Send Char', help='Max Send Prompt Length', default=8000)
def action_disconnect(self):
requests.delete('https://chatgpt.com/v1/disconnect')
@@ -117,6 +119,14 @@ GPT-3 A set of models that can understand and generate natural language
if sensi is not None:
_logger.error('==========敏感词:%s' % sensi['Keyword'])
return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')
elif not author_id.gpt_id and answer_id.gpt_id:
user_id = answer_id.user_ids[:1]
gpt_policy = user_id.gpt_policy
gpt_wl_partners = user_id.gpt_wl_partners
is_allow = author_id.id in gpt_wl_partners.ids
if gpt_policy != 'all' and not is_allow:
# 暂时有限用户的Ai
return _('此Ai暂时未开放请联系管理员。')
else:
return False

View File

@@ -125,7 +125,7 @@ class Channel(models.Model):
# 2个人的非私有频道不处理
pass
else:
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id and r != message.author_id)[:1]
user_id = partners.mapped('user_ids')[:1]
elif not message.author_id.gpt_id:
# 没有@时默认第一个robot
@@ -139,16 +139,17 @@ class Channel(models.Model):
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
user_id = partners.mapped('user_ids')[:1]
if user_id:
# todo: 此处理不判断,将此处逻辑迁移至 get_ai_pre 非ai回复的直接内容注意设置为 is_ai=false
gpt_policy = user_id.gpt_policy
gpt_wl_partners = user_id.gpt_wl_partners
is_allow = message.author_id.id in gpt_wl_partners.ids
answer_id = user_id.partner_id
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
ai = user_id.sudo().gpt_id
elif user_id.gpt_id and not is_allow:
# 暂时有限用户的Ai
raise UserError(_('此Ai暂时未开放请联系管理员。'))
ai = user_id.sudo().gpt_id
# 此处理不判断,将此处逻辑迁移至 get_ai_pre 非ai回复的直接内容注意设置为 is_ai=false
# gpt_policy = user_id.gpt_policy
# gpt_wl_partners = user_id.gpt_wl_partners
# is_allow = message.author_id.id in gpt_wl_partners.ids
# answer_id = user_id.partner_id
# if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
# ai = user_id.sudo().gpt_id
# elif user_id.gpt_id and not is_allow:
# # 暂时有限用户的Ai
# raise UserError(_('此Ai暂时未开放请联系管理员。'))
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
@@ -200,8 +201,19 @@ class Channel(models.Model):
messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages)
# 接口最大接收 8430 Token
if msg_len * 2 >= 8000:
messages = [{"role": "user", "content": msg}]
if msg_len * 2 > ai.max_send_char:
messages = []
if hasattr(channel, 'is_private') and channel.description:
messages.append({"role": "system", "content": channel.description})
messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages)
if msg_len * 2 > ai.max_send_char:
new_msg = channel.with_user(user_id).message_post(body=_('您所发送的提示词已超长。'), message_type='comment',
subtype_xmlid='mail.mt_comment',
parent_id=message.id)
# if msg_len * 2 >= 8000:
# messages = [{"role": "user", "content": msg}]
if sync_config == 'sync':
self.get_ai_response(ai, messages, channel, user_id, message)
else:
@@ -213,6 +225,7 @@ class Channel(models.Model):
def _message_post_after_hook(self, message, msg_vals):
if message.author_id.gpt_id:
if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')]:
if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。'),
_('此Ai暂时未开放请联系管理员。'), _('您所发送的提示词已超长。')]:
message.is_ai = True
return super(Channel, self)._message_post_after_hook(message, msg_vals)

View File

@@ -12,6 +12,7 @@
<field name="openapi_api_key" password="True"/>
<field name="max_tokens" optional="show"/>
<field name="temperature"/>
<field name="max_send_char"/>
</tree>
</field>
</record>
@@ -40,6 +41,7 @@
<field name="frequency_penalty"/>
<field name="presence_penalty"/>
<field name="sys_content" placeholder="Role-playing and scene setting.Give the model instructions about how it should behave and any context it should reference when generating a response."/>
<field name="max_send_char"/>
</group>
<group>
<field name="ai_model"/>