fix #I6YOA1 [app_chatgpt] 发送prompt长度的限制,在robot增加 max_send_char参数

This commit is contained in:
Chill
2023-05-15 16:20:13 +08:00
parent 16a2ddcd58
commit 2d8f4baf76
3 changed files with 20 additions and 4 deletions

View File

@@ -102,6 +102,8 @@ GPT-3 A set of models that can understand and generate natural language
sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.') sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.')
is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt') is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt')
max_send_char = fields.Integer('Max Send Char', help='Max Send Prompt Length', default=8000)
def action_disconnect(self): def action_disconnect(self):
requests.delete('https://chatgpt.com/v1/disconnect') requests.delete('https://chatgpt.com/v1/disconnect')

View File

@@ -125,7 +125,7 @@ class Channel(models.Model):
# 2个人的非私有频道不处理 # 2个人的非私有频道不处理
pass pass
else: else:
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1] partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id and r != message.author_id)[:1]
user_id = partners.mapped('user_ids')[:1] user_id = partners.mapped('user_ids')[:1]
elif not message.author_id.gpt_id: elif not message.author_id.gpt_id:
# 没有@时默认第一个robot # 没有@时默认第一个robot
@@ -201,8 +201,19 @@ class Channel(models.Model):
messages.append({"role": "user", "content": msg}) messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages) msg_len = sum(len(str(m)) for m in messages)
# 接口最大接收 8430 Token # 接口最大接收 8430 Token
if msg_len * 2 >= 8000: if msg_len * 2 > ai.max_send_char:
messages = [{"role": "user", "content": msg}] messages = []
if hasattr(channel, 'is_private') and channel.description:
messages.append({"role": "system", "content": channel.description})
messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages)
if msg_len * 2 > ai.max_send_char:
new_msg = channel.with_user(user_id).message_post(body=_('您所发送的提示词已超长。'), message_type='comment',
subtype_xmlid='mail.mt_comment',
parent_id=message.id)
# if msg_len * 2 >= 8000:
# messages = [{"role": "user", "content": msg}]
if sync_config == 'sync': if sync_config == 'sync':
self.get_ai_response(ai, messages, channel, user_id, message) self.get_ai_response(ai, messages, channel, user_id, message)
else: else:
@@ -214,6 +225,7 @@ class Channel(models.Model):
def _message_post_after_hook(self, message, msg_vals): def _message_post_after_hook(self, message, msg_vals):
if message.author_id.gpt_id: if message.author_id.gpt_id:
if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')]: if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。'),
_('此Ai暂时未开放请联系管理员。'), _('您所发送的提示词已超长。')]:
message.is_ai = True message.is_ai = True
return super(Channel, self)._message_post_after_hook(message, msg_vals) return super(Channel, self)._message_post_after_hook(message, msg_vals)

View File

@@ -12,6 +12,7 @@
<field name="openapi_api_key" password="True"/> <field name="openapi_api_key" password="True"/>
<field name="max_tokens" optional="show"/> <field name="max_tokens" optional="show"/>
<field name="temperature"/> <field name="temperature"/>
<field name="max_send_char"/>
</tree> </tree>
</field> </field>
</record> </record>
@@ -40,6 +41,7 @@
<field name="frequency_penalty"/> <field name="frequency_penalty"/>
<field name="presence_penalty"/> <field name="presence_penalty"/>
<field name="sys_content" placeholder="Role-playing and scene setting.Give the model instructions about how it should behave and any context it should reference when generating a response."/> <field name="sys_content" placeholder="Role-playing and scene setting.Give the model instructions about how it should behave and any context it should reference when generating a response."/>
<field name="max_send_char"/>
</group> </group>
<group> <group>
<field name="ai_model"/> <field name="ai_model"/>