mirror of
https://github.com/guohuadeng/app-odoo.git
synced 2025-02-23 04:11:36 +02:00
update chatgpt, deepseek, misc
This commit is contained in:
@@ -18,7 +18,7 @@ class AiRobot(models.Model):
|
||||
_name = 'ai.robot'
|
||||
_description = 'Ai Robot'
|
||||
_order = 'sequence, name'
|
||||
|
||||
|
||||
name = fields.Char(string='Name', translate=True, required=True)
|
||||
provider = fields.Selection(string="AI Provider", selection=[('openai', 'OpenAI'), ('azure', 'Azure')],
|
||||
required=True, default='openai', change_default=True)
|
||||
@@ -39,8 +39,9 @@ class AiRobot(models.Model):
|
||||
('code-davinci-002', 'Chatgpt 2 Code Optimized'),
|
||||
('text-davinci-002', 'Chatgpt 2 Davinci'),
|
||||
('dall-e2', 'Dall-E Image'),
|
||||
], default='gpt-3.5-turbo-0125',
|
||||
help="""
|
||||
], default='gpt-4o',
|
||||
help="""
|
||||
GPT-4o: It is multimodal (accepting text or image inputs and outputting text), and it has the same high intelligence as GPT-4 Turbo but is much more efficient—it generates text 2x faster and is 50% cheaper.
|
||||
GPT-4: Can understand Image, generate natural language or code.
|
||||
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
|
||||
DALL·E: A model that can generate and edit images given a natural language prompt
|
||||
@@ -110,36 +111,36 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
# end gpt 参数
|
||||
endpoint = fields.Char('End Point', default='https://api.openai.com/v1/chat/completions')
|
||||
engine = fields.Char('Engine', help='If use Azure, Please input the Model deployment name.')
|
||||
api_version = fields.Char('API Version', default='2022-12-01')
|
||||
api_version = fields.Char('API Version', default='gpt-4o')
|
||||
ai_timeout = fields.Integer('Timeout(seconds)', help="Connect timeout for Ai response", default=120)
|
||||
sequence = fields.Integer('Sequence', help="Determine the display order", default=10)
|
||||
sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.')
|
||||
is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt')
|
||||
|
||||
|
||||
max_send_char = fields.Integer('Max Send Char', help='Max Send Prompt Length', default=8000)
|
||||
image_avatar = fields.Image('Avatar')
|
||||
partner_ids = fields.One2many('res.partner', 'gpt_id', string='Partner')
|
||||
partner_count = fields.Integer('#Partner', compute='_compute_partner_count', store=False)
|
||||
active = fields.Boolean('Active', default=True)
|
||||
|
||||
|
||||
def _compute_partner_count(self):
|
||||
for rec in self:
|
||||
rec.partner_count = len(rec.partner_ids)
|
||||
|
||||
|
||||
def action_disconnect(self):
|
||||
requests.delete('https://chatgpt.com/v1/disconnect')
|
||||
|
||||
|
||||
def get_ai_pre(self, data, author_id=False, answer_id=False, param={}):
|
||||
# hook,都正常
|
||||
return False
|
||||
|
||||
|
||||
def get_ai(self, data, author_id=False, answer_id=False, param={}):
|
||||
# 通用方法
|
||||
# author_id: 请求的 partner_id 对象
|
||||
# answer_id: 回答的 partner_id 对象
|
||||
# param,dict 形式的参数
|
||||
# 调整输出为2个参数:res_post详细内容,is_ai是否ai的响应
|
||||
|
||||
|
||||
self.ensure_one()
|
||||
# 前置勾子,一般返回 False,有问题返回响应内容,用于处理敏感词等
|
||||
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
|
||||
@@ -149,19 +150,19 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
if not hasattr(self, 'get_%s' % self.provider):
|
||||
res = _('No robot provider found')
|
||||
return res, {}, False
|
||||
|
||||
|
||||
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
|
||||
# 后置勾子,返回处理后的内容
|
||||
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
|
||||
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
|
||||
return res_post, usage, is_ai
|
||||
|
||||
|
||||
def get_ai_origin(self, data, author_id=False, answer_id=False, param={}):
|
||||
# 通用方法
|
||||
# author_id: 请求的 partner_id 对象
|
||||
# answer_id: 回答的 partner_id 对象
|
||||
# param,dict 形式的参数
|
||||
# 调整输出为2个参数:res_post详细内容,is_ai是否ai的响应
|
||||
|
||||
|
||||
self.ensure_one()
|
||||
# 前置勾子,一般返回 False,有问题返回响应内容,用于处理敏感词等
|
||||
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
|
||||
@@ -171,19 +172,18 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
if not hasattr(self, 'get_%s' % self.provider):
|
||||
res = _('No robot provider found')
|
||||
return res, {}, False
|
||||
|
||||
|
||||
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
|
||||
# 后置勾子,返回处理后的内容
|
||||
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
|
||||
return res
|
||||
|
||||
|
||||
def get_ai_post(self, res, author_id=False, answer_id=False, param=None):
|
||||
# hook,高级版要替代
|
||||
if param is None:
|
||||
param = {}
|
||||
if not res or not author_id or (not isinstance(res, list) and not isinstance(res, dict)):
|
||||
return res, False, False
|
||||
# 返回是个对象,那么就是ai
|
||||
usage = content = data = None
|
||||
try:
|
||||
if self.provider == 'openai':
|
||||
@@ -214,7 +214,7 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
if sys_content:
|
||||
return {"role": "system", "content": sys_content}
|
||||
return {}
|
||||
|
||||
|
||||
def get_ai_model_info(self):
|
||||
self.ensure_one()
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||
@@ -222,7 +222,7 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
o_url = "https://api.openai.com/v1/models/%s" % self.ai_model
|
||||
if self.endpoint:
|
||||
o_url = self.endpoint.replace("/chat/completions", "") + "/models/%s" % self.ai_model
|
||||
|
||||
|
||||
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
|
||||
response.close()
|
||||
if response:
|
||||
@@ -231,32 +231,29 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
else:
|
||||
r_text = 'No response.'
|
||||
raise UserError(r_text)
|
||||
|
||||
|
||||
def get_ai_list_model(self):
|
||||
self.ensure_one()
|
||||
if self.provider == 'openai':
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||
R_TIMEOUT = self.ai_timeout or 120
|
||||
o_url = "https://api.openai.com/v1/models"
|
||||
if self.endpoint:
|
||||
o_url = self.endpoint.replace("/chat/completions", "") + "/models"
|
||||
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
|
||||
response.close()
|
||||
if response:
|
||||
res = response.json()
|
||||
r_text = json.dumps(res, indent=2)
|
||||
else:
|
||||
r_text = 'No response.'
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||
R_TIMEOUT = self.ai_timeout or 120
|
||||
o_url = "https://api.openai.com/v1/models"
|
||||
if self.endpoint:
|
||||
o_url = self.endpoint.replace("/chat/completions", "") + "/models"
|
||||
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
|
||||
response.close()
|
||||
if response:
|
||||
res = response.json()
|
||||
r_text = json.dumps(res, indent=2)
|
||||
else:
|
||||
r_text = 'No response.'
|
||||
raise UserError(r_text)
|
||||
|
||||
|
||||
def get_openai(self, data, author_id, answer_id, param={}):
|
||||
self.ensure_one()
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||
R_TIMEOUT = self.ai_timeout or 120
|
||||
o_url = self.endpoint or "https://api.openai.com/v1/chat/completions"
|
||||
|
||||
|
||||
# 处理传参,传过来的优先于 robot 默认的
|
||||
max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
|
||||
temperature = param.get('temperature') if param.get('temperature') else self.temperature
|
||||
@@ -264,7 +261,7 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
|
||||
presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
|
||||
request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
|
||||
|
||||
|
||||
if self.stop:
|
||||
stop = self.stop.split(',')
|
||||
else:
|
||||
@@ -294,7 +291,7 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
client = OpenAI(
|
||||
api_key=self.openapi_api_key,
|
||||
timeout=R_TIMEOUT
|
||||
)
|
||||
)
|
||||
response = client.chat.completions.create(
|
||||
messages=data,
|
||||
model=self.ai_model,
|
||||
@@ -304,7 +301,7 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
return res
|
||||
else:
|
||||
_logger.warning('=====================openai output data: %s' % response.json())
|
||||
|
||||
|
||||
return _("Response Timeout, please speak again.")
|
||||
|
||||
def get_azure(self, data, author_id, answer_id, param={}):
|
||||
@@ -312,10 +309,10 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
# only for azure
|
||||
if not self.endpoint:
|
||||
raise UserError(_("Please Set your AI robot's endpoint first."))
|
||||
|
||||
|
||||
if not self.api_version:
|
||||
raise UserError(_("Please Set your AI robot's API Version first."))
|
||||
|
||||
|
||||
if self.stop:
|
||||
stop = self.stop.split(',')
|
||||
else:
|
||||
@@ -324,22 +321,22 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
messages = data
|
||||
else:
|
||||
messages = [{"role": "user", "content": data}]
|
||||
|
||||
|
||||
# 处理传参,传过来的优先于 robot 默认的
|
||||
max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
|
||||
temperature = param.get('temperature') if param.get('temperature') else self.temperature
|
||||
top_p = param.get('top_p') if param.get('top_p') else self.top_p
|
||||
frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
|
||||
presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
|
||||
request_timeout= param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
|
||||
|
||||
request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
|
||||
|
||||
# Ai角色设定,如果没设定则再处理
|
||||
if messages[0].get('role') != 'system':
|
||||
sys_content = self.get_ai_system(param.get('sys_content'))
|
||||
if sys_content:
|
||||
messages.insert(0, sys_content)
|
||||
# 暂时不变
|
||||
|
||||
|
||||
client = AzureOpenAI(
|
||||
api_version=self.api_version,
|
||||
azure_endpoint=self.endpoint,
|
||||
@@ -372,14 +369,14 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
else:
|
||||
_logger.warning('=====================azure output data: %s' % response.json())
|
||||
return _("Response Timeout, please speak again.")
|
||||
|
||||
|
||||
@api.onchange('provider')
|
||||
def _onchange_provider(self):
|
||||
if self.provider == 'openai':
|
||||
self.endpoint = 'https://api.openai.com/v1/chat/completions'
|
||||
elif self.provider == 'azure':
|
||||
self.endpoint = 'https://odoo.openai.azure.com'
|
||||
|
||||
|
||||
if self.provider:
|
||||
# 取头像
|
||||
module_path = modules.get_module_path('app_chatgpt', display_warning=False)
|
||||
@@ -388,7 +385,7 @@ GPT-3 A set of models that can understand and generate natural language
|
||||
if path:
|
||||
image_file = tools.file_open(path, 'rb')
|
||||
self.image_avatar = base64.b64encode(image_file.read())
|
||||
|
||||
|
||||
@api.onchange('set_ai_model')
|
||||
def _onchange_set_ai_model(self):
|
||||
if self.set_ai_model:
|
||||
|
||||
Reference in New Issue
Block a user