update gpt

This commit is contained in:
Ivan Office
2023-09-22 20:47:18 +08:00
parent 6ef2c27b4a
commit 8db7fabe56
11 changed files with 91 additions and 100 deletions

View File

@@ -52,7 +52,7 @@ GPT-3 A set of models that can understand and generate natural language
# 3. top_p0-1语言连贯性与temperature有些类似也是控制回复的“新颖度”。不同的是top_p控制的是回复中概率最高的几个可能性的累计概率之和值越小生成的回复越保守值越大生成的回复越新颖。
# 4. frequency_penalty-2~2用于控制聊天机器人回复中出现频率过高的词汇的惩罚程度。聊天机器人会尝试避免在回复中使用频率较高的词汇以提高回复的多样性和新颖度。
# 5. presence_penalty-2~2与frequency_penalty相对用于控制聊天机器人回复中出现频率较低的词汇的惩罚程度。聊天机器人会尝试在回复中使用频率较低的词汇以提高回复的多样性和新颖度。
max_tokens = fields.Integer('Max response', default=600,
max_tokens = fields.Integer('Max Response', default=600,
help="""
Set a limit on the number of tokens per model response.
The API supports a maximum of 4000 tokens shared between the prompt
@@ -66,7 +66,7 @@ GPT-3 A set of models that can understand and generate natural language
Increasing the temperature will result in more unexpected or creative responses.
Try adjusting temperature or Top P but not both.
""")
top_p = fields.Float('Top probabilities', default=0.6,
top_p = fields.Float('Top Probabilities', default=0.6,
help="""
Similar to temperature, this controls randomness but uses a different method.
Lowering Top P will narrow the models token selection to likelier tokens.
@@ -74,7 +74,7 @@ GPT-3 A set of models that can understand and generate natural language
Try adjusting temperature or Top P but not both
""")
# 避免使用常用词
frequency_penalty = fields.Float('Frequency penalty', default=1,
frequency_penalty = fields.Float('Frequency Penalty', default=1,
help="""
Reduce the chance of repeating a token proportionally based on how often it has appeared in the text so far.
This decreases the likelihood of repeating the exact same text in a response.

View File

@@ -27,32 +27,32 @@ class Channel(models.Model):
domain=[('gpt_id', '!=', None), ('is_chat_private', '=', True)])
description = fields.Char('Ai Character', help="Ai would help you act as the Character set.")
set_max_tokens = fields.Selection([
('300', '简短'),
('600', '标准'),
('1000', '中等'),
('2000', '长篇'),
('3000', '超长篇'),
('300', 'Short'),
('600', 'Standard'),
('1000', 'Medium'),
('2000', 'Long'),
('3000', 'Overlength'),
('32000', '32K'),
], string='响应篇幅限制', default='600', help="越大返回内容越多,计费也越多")
], string='Max Response', default='600', help="越大返回内容越多,计费也越多")
set_chat_count = fields.Selection([
('none', 'Ai自动判断'),
('none', 'Ai Auto'),
('1', '1标准'),
('3', '3强关联'),
('5', '5超强关联'),
], string="上下文相关", default='1', help="0-5设定后会将最近n次对话发给Ai有助于他更好的回答但太大费用也高")
], string="History Count", default='1', help="0-5设定后会将最近n次对话发给Ai有助于他更好的回答但太大费用也高")
set_temperature = fields.Selection([
('2', '天马行空'),
('1.5', '创造性'),
('1', '标准'),
('0.6', '理性'),
('0.1', '保守'),
], string="创造性", default='1', help="0-21值越大越富有想像力越小则越保守")
], string="Set Temperature", default='1', help="0-21值越大越富有想像力越小则越保守")
set_top_p = fields.Selection([
('0.9', '严谨惯性思维'),
('0.6', '标准推理'),
('0.4', '跳跃性'),
('0.1', '随便'),
], string="思维连贯性", default='0.6', help="0-1值越大越倾向大众化的连贯思维")
], string="Top Probabilities", default='0.6', help="0-1值越大越倾向大众化的连贯思维")
# 避免使用常用词
set_frequency_penalty = fields.Selection([
('2', '老学究-晦涩难懂'),
@@ -61,7 +61,7 @@ class Channel(models.Model):
('0.1', '少常用词'),
('-1', '通俗易懂'),
('-2', '大白话'),
], string='语言风格', default='1', help="-2~2值越大越少使用常用词")
], string='Frequency Penalty', default='1', help="-2~2值越大越少使用常用词")
set_presence_penalty = fields.Selection([
('2', '多样强迫症'),
('1.5', '新颖化'),
@@ -69,7 +69,7 @@ class Channel(models.Model):
('0.1', '允许常规重复'),
('-1', '允许较多重复'),
('-2', '更多强调重复'),
], string='用词多样性', default='1', help="-2~2值越大越少重复词")
], string='Presence penalty', default='1', help="-2~2值越大越少重复词")
# todo: 这里用 compute?
max_tokens = fields.Integer('最长响应Token', default=600, help="越大返回内容越多,计费也越多")