mirror of
https://github.com/guohuadeng/app-odoo.git
synced 2025-02-23 04:11:36 +02:00
Merge remote-tracking branch 'origin/16.0' into 16.0
This commit is contained in:
@@ -102,6 +102,8 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.')
|
sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.')
|
||||||
is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt')
|
is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt')
|
||||||
|
|
||||||
|
max_send_char = fields.Integer('Max Send Char', help='Max Send Prompt Length', default=8000)
|
||||||
|
|
||||||
def action_disconnect(self):
|
def action_disconnect(self):
|
||||||
requests.delete('https://chatgpt.com/v1/disconnect')
|
requests.delete('https://chatgpt.com/v1/disconnect')
|
||||||
|
|
||||||
@@ -117,6 +119,14 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
if sensi is not None:
|
if sensi is not None:
|
||||||
_logger.error('==========敏感词:%s' % sensi['Keyword'])
|
_logger.error('==========敏感词:%s' % sensi['Keyword'])
|
||||||
return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')
|
return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')
|
||||||
|
elif not author_id.gpt_id and answer_id.gpt_id:
|
||||||
|
user_id = answer_id.user_ids[:1]
|
||||||
|
gpt_policy = user_id.gpt_policy
|
||||||
|
gpt_wl_partners = user_id.gpt_wl_partners
|
||||||
|
is_allow = author_id.id in gpt_wl_partners.ids
|
||||||
|
if gpt_policy != 'all' and not is_allow:
|
||||||
|
# 暂时有限用户的Ai
|
||||||
|
return _('此Ai暂时未开放,请联系管理员。')
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|||||||
@@ -125,7 +125,7 @@ class Channel(models.Model):
|
|||||||
# 2个人的非私有频道不处理
|
# 2个人的非私有频道不处理
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
|
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id and r != message.author_id)[:1]
|
||||||
user_id = partners.mapped('user_ids')[:1]
|
user_id = partners.mapped('user_ids')[:1]
|
||||||
elif not message.author_id.gpt_id:
|
elif not message.author_id.gpt_id:
|
||||||
# 没有@时,默认第一个robot
|
# 没有@时,默认第一个robot
|
||||||
@@ -139,16 +139,17 @@ class Channel(models.Model):
|
|||||||
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
|
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
|
||||||
user_id = partners.mapped('user_ids')[:1]
|
user_id = partners.mapped('user_ids')[:1]
|
||||||
if user_id:
|
if user_id:
|
||||||
# todo: 此处理不判断,将此处逻辑迁移至 get_ai_pre, 非ai回复的直接内容注意设置为 is_ai=false
|
|
||||||
gpt_policy = user_id.gpt_policy
|
|
||||||
gpt_wl_partners = user_id.gpt_wl_partners
|
|
||||||
is_allow = message.author_id.id in gpt_wl_partners.ids
|
|
||||||
answer_id = user_id.partner_id
|
|
||||||
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
|
||||||
ai = user_id.sudo().gpt_id
|
ai = user_id.sudo().gpt_id
|
||||||
elif user_id.gpt_id and not is_allow:
|
# 此处理不判断,将此处逻辑迁移至 get_ai_pre, 非ai回复的直接内容注意设置为 is_ai=false
|
||||||
# 暂时有限用户的Ai
|
# gpt_policy = user_id.gpt_policy
|
||||||
raise UserError(_('此Ai暂时未开放,请联系管理员。'))
|
# gpt_wl_partners = user_id.gpt_wl_partners
|
||||||
|
# is_allow = message.author_id.id in gpt_wl_partners.ids
|
||||||
|
# answer_id = user_id.partner_id
|
||||||
|
# if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||||
|
# ai = user_id.sudo().gpt_id
|
||||||
|
# elif user_id.gpt_id and not is_allow:
|
||||||
|
# # 暂时有限用户的Ai
|
||||||
|
# raise UserError(_('此Ai暂时未开放,请联系管理员。'))
|
||||||
|
|
||||||
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
||||||
|
|
||||||
@@ -200,8 +201,19 @@ class Channel(models.Model):
|
|||||||
messages.append({"role": "user", "content": msg})
|
messages.append({"role": "user", "content": msg})
|
||||||
msg_len = sum(len(str(m)) for m in messages)
|
msg_len = sum(len(str(m)) for m in messages)
|
||||||
# 接口最大接收 8430 Token
|
# 接口最大接收 8430 Token
|
||||||
if msg_len * 2 >= 8000:
|
if msg_len * 2 > ai.max_send_char:
|
||||||
messages = [{"role": "user", "content": msg}]
|
messages = []
|
||||||
|
if hasattr(channel, 'is_private') and channel.description:
|
||||||
|
messages.append({"role": "system", "content": channel.description})
|
||||||
|
messages.append({"role": "user", "content": msg})
|
||||||
|
msg_len = sum(len(str(m)) for m in messages)
|
||||||
|
if msg_len * 2 > ai.max_send_char:
|
||||||
|
new_msg = channel.with_user(user_id).message_post(body=_('您所发送的提示词已超长。'), message_type='comment',
|
||||||
|
subtype_xmlid='mail.mt_comment',
|
||||||
|
parent_id=message.id)
|
||||||
|
|
||||||
|
# if msg_len * 2 >= 8000:
|
||||||
|
# messages = [{"role": "user", "content": msg}]
|
||||||
if sync_config == 'sync':
|
if sync_config == 'sync':
|
||||||
self.get_ai_response(ai, messages, channel, user_id, message)
|
self.get_ai_response(ai, messages, channel, user_id, message)
|
||||||
else:
|
else:
|
||||||
@@ -213,6 +225,7 @@ class Channel(models.Model):
|
|||||||
|
|
||||||
def _message_post_after_hook(self, message, msg_vals):
|
def _message_post_after_hook(self, message, msg_vals):
|
||||||
if message.author_id.gpt_id:
|
if message.author_id.gpt_id:
|
||||||
if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')]:
|
if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。'),
|
||||||
|
_('此Ai暂时未开放,请联系管理员。'), _('您所发送的提示词已超长。')]:
|
||||||
message.is_ai = True
|
message.is_ai = True
|
||||||
return super(Channel, self)._message_post_after_hook(message, msg_vals)
|
return super(Channel, self)._message_post_after_hook(message, msg_vals)
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
<field name="openapi_api_key" password="True"/>
|
<field name="openapi_api_key" password="True"/>
|
||||||
<field name="max_tokens" optional="show"/>
|
<field name="max_tokens" optional="show"/>
|
||||||
<field name="temperature"/>
|
<field name="temperature"/>
|
||||||
|
<field name="max_send_char"/>
|
||||||
</tree>
|
</tree>
|
||||||
</field>
|
</field>
|
||||||
</record>
|
</record>
|
||||||
@@ -40,6 +41,7 @@
|
|||||||
<field name="frequency_penalty"/>
|
<field name="frequency_penalty"/>
|
||||||
<field name="presence_penalty"/>
|
<field name="presence_penalty"/>
|
||||||
<field name="sys_content" placeholder="Role-playing and scene setting.Give the model instructions about how it should behave and any context it should reference when generating a response."/>
|
<field name="sys_content" placeholder="Role-playing and scene setting.Give the model instructions about how it should behave and any context it should reference when generating a response."/>
|
||||||
|
<field name="max_send_char"/>
|
||||||
</group>
|
</group>
|
||||||
<group>
|
<group>
|
||||||
<field name="ai_model"/>
|
<field name="ai_model"/>
|
||||||
|
|||||||
Reference in New Issue
Block a user