[17] app_ai_kimi

This commit is contained in:
Chill
2025-02-11 17:32:15 +08:00
parent 84cc7d284b
commit 5f52a6dccd
5 changed files with 63 additions and 30 deletions

View File

@@ -10,7 +10,7 @@
{ {
'name': 'ChatGPT4, China Ali,AiGC Center.Ai服务中心聚合全网Ai', 'name': 'ChatGPT4, China Ali,AiGC Center.Ai服务中心聚合全网Ai',
'version': '17.0.25.02.10', 'version': '17.0.25.02.11',
'author': 'odooai.cn', 'author': 'odooai.cn',
'company': 'odooai.cn', 'company': 'odooai.cn',
'maintainer': 'odooai.cn', 'maintainer': 'odooai.cn',

View File

@@ -132,13 +132,17 @@ GPT-3 A set of models that can understand and generate natural language
# hook都正常 # hook都正常
return False return False
def get_msg_file_content(self, message):
# hook
return False
def get_ai(self, data, author_id=False, answer_id=False, param={}): def get_ai(self, data, author_id=False, answer_id=False, param={}):
# 通用方法 # 通用方法
# author_id: 请求的 partner_id 对象 # author_id: 请求的 partner_id 对象
# answer_id: 回答的 partner_id 对象 # answer_id: 回答的 partner_id 对象
# paramdict 形式的参数 # paramdict 形式的参数
# 调整输出为2个参数res_post详细内容is_ai是否ai的响应 # 调整输出为2个参数res_post详细内容is_ai是否ai的响应
self.ensure_one() self.ensure_one()
# 前置勾子,一般返回 False有问题返回响应内容用于处理敏感词等 # 前置勾子,一般返回 False有问题返回响应内容用于处理敏感词等
res_pre = self.get_ai_pre(data, author_id, answer_id, param) res_pre = self.get_ai_pre(data, author_id, answer_id, param)
@@ -148,7 +152,7 @@ GPT-3 A set of models that can understand and generate natural language
if not hasattr(self, 'get_%s' % self.provider): if not hasattr(self, 'get_%s' % self.provider):
res = _('No robot provider found') res = _('No robot provider found')
return res, {}, False return res, {}, False
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param) res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
# 后置勾子,返回处理后的内容 # 后置勾子,返回处理后的内容
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param) res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
@@ -175,7 +179,7 @@ GPT-3 A set of models that can understand and generate natural language
# 后置勾子,返回处理后的内容 # 后置勾子,返回处理后的内容
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param) res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
return res return res
def get_ai_post(self, res, author_id=False, answer_id=False, param=None): def get_ai_post(self, res, author_id=False, answer_id=False, param=None):
# hook高级版要替代 # hook高级版要替代
if param is None: if param is None:
@@ -202,14 +206,14 @@ GPT-3 A set of models that can understand and generate natural language
except Exception as e: except Exception as e:
_logger.error('==========app_chatgpt get_ai_post Error: %s' % e) _logger.error('==========app_chatgpt get_ai_post Error: %s' % e)
return res, False, False return res, False, False
def get_ai_system(self, content=None): def get_ai_system(self, content=None):
# 获取基础ai角色设定, role system # 获取基础ai角色设定, role system
sys_content = content or self.sys_content sys_content = content or self.sys_content
if sys_content: if sys_content:
return {"role": "system", "content": sys_content} return {"role": "system", "content": sys_content}
return {} return {}
def get_ai_model_info(self): def get_ai_model_info(self):
self.ensure_one() self.ensure_one()
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"} headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
@@ -217,7 +221,7 @@ GPT-3 A set of models that can understand and generate natural language
o_url = "https://api.openai.com/v1/models/%s" % self.ai_model o_url = "https://api.openai.com/v1/models/%s" % self.ai_model
if self.endpoint: if self.endpoint:
o_url = self.endpoint.replace("/chat/completions", "") + "/models/%s" % self.ai_model o_url = self.endpoint.replace("/chat/completions", "") + "/models/%s" % self.ai_model
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT) response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
response.close() response.close()
if response: if response:
@@ -259,7 +263,7 @@ GPT-3 A set of models that can understand and generate natural language
frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
if self.stop: if self.stop:
stop = self.stop.split(',') stop = self.stop.split(',')
else: else:
@@ -299,7 +303,7 @@ GPT-3 A set of models that can understand and generate natural language
return res return res
else: else:
_logger.warning('=====================openai output data: %s' % response.json()) _logger.warning('=====================openai output data: %s' % response.json())
return _("Response Timeout, please speak again.") return _("Response Timeout, please speak again.")
def get_azure(self, data, author_id, answer_id, param={}): def get_azure(self, data, author_id, answer_id, param={}):
@@ -307,10 +311,10 @@ GPT-3 A set of models that can understand and generate natural language
# only for azure # only for azure
if not self.endpoint: if not self.endpoint:
raise UserError(_("Please Set your AI robot's endpoint first.")) raise UserError(_("Please Set your AI robot's endpoint first."))
if not self.api_version: if not self.api_version:
raise UserError(_("Please Set your AI robot's API Version first.")) raise UserError(_("Please Set your AI robot's API Version first."))
if self.stop: if self.stop:
stop = self.stop.split(',') stop = self.stop.split(',')
else: else:
@@ -334,7 +338,7 @@ GPT-3 A set of models that can understand and generate natural language
if sys_content: if sys_content:
messages.insert(0, sys_content) messages.insert(0, sys_content)
# 暂时不变 # 暂时不变
client = AzureOpenAI( client = AzureOpenAI(
api_version=self.api_version, api_version=self.api_version,
azure_endpoint=self.endpoint, azure_endpoint=self.endpoint,
@@ -366,7 +370,7 @@ GPT-3 A set of models that can understand and generate natural language
self.endpoint = 'https://api.openai.com/v1/chat/completions' self.endpoint = 'https://api.openai.com/v1/chat/completions'
elif self.provider == 'azure': elif self.provider == 'azure':
self.endpoint = 'https://odoo.openai.azure.com' self.endpoint = 'https://odoo.openai.azure.com'
if self.provider: if self.provider:
# 取头像 # 取头像
module_path = modules.get_module_path('app_chatgpt', display_warning=False) module_path = modules.get_module_path('app_chatgpt', display_warning=False)
@@ -375,7 +379,7 @@ GPT-3 A set of models that can understand and generate natural language
if path: if path:
image_file = tools.file_open(path, 'rb') image_file = tools.file_open(path, 'rb')
self.image_avatar = base64.b64encode(image_file.read()) self.image_avatar = base64.b64encode(image_file.read())
@api.onchange('set_ai_model') @api.onchange('set_ai_model')
def _onchange_set_ai_model(self): def _onchange_set_ai_model(self):
if self.set_ai_model: if self.set_ai_model:

View File

@@ -241,7 +241,7 @@ class Channel(models.Model):
if hasattr(ai, 'is_translator') and ai.is_translator and ai.ai_model == 'translator': if hasattr(ai, 'is_translator') and ai.is_translator and ai.ai_model == 'translator':
return rdata return rdata
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt') chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
if message.body == _('<div class="o_mail_notification">joined the channel</div>'): if message.body == _('<div class="o_mail_notification">joined the channel</div>'):
msg = _("Please warmly welcome our new partner %s and send him the best wishes.") % message.author_id.name msg = _("Please warmly welcome our new partner %s and send him the best wishes.") % message.author_id.name
else: else:
@@ -277,7 +277,7 @@ class Channel(models.Model):
chat_count = 1 chat_count = 1
else: else:
chat_count = chat_count chat_count = chat_count
if author_id != answer_id.id and self.channel_type == 'chat': if author_id != answer_id.id and self.channel_type == 'chat':
# 私聊 # 私聊
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}') _logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
@@ -291,24 +291,53 @@ class Channel(models.Model):
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id')) channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
if hasattr(channel, 'is_private') and channel.description: if hasattr(channel, 'is_private') and channel.description:
messages.append({"role": "system", "content": channel.description}) messages.append({"role": "system", "content": channel.description})
try: try:
# 处理提示词
sys_content = '%s%s' % (channel.description if channel.description else "", add_sys_content if add_sys_content else "")
if len(sys_content):
messages.append({"role": "system", "content": sys_content})
c_history = self.get_openai_context(channel.id, author_id, answer_id, openapi_context_timeout, chat_count) c_history = self.get_openai_context(channel.id, author_id, answer_id, openapi_context_timeout, chat_count)
if c_history: if c_history:
messages += c_history messages += c_history
messages.append({"role": "user", "content": msg}) if message.attachment_ids:
attachment = message.attachment_ids[:1]
file_content = ai.get_msg_file_content(message)
if not file_content:
messages.append({"role": "user", "content": msg})
if attachment.mimetype in ['image/jpeg', 'image/png', 'image/gif', 'image/bmp', 'image/webp']:
messages.append({
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {
"url": file_content,
},
},
{
"type": "text",
"text": msg
}
]
})
else:
messages.append({"role": "system", "content": file_content})
messages.append({"role": "user", "content": msg})
else:
messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages) msg_len = sum(len(str(m)) for m in messages)
# 接口最大接收 8430 Token # 接口最大接收 8430 Token
if msg_len * 2 > ai.max_send_char: # if msg_len * 2 > ai.max_send_char:
messages = [] # messages = []
if hasattr(channel, 'is_private') and channel.description: # if hasattr(channel, 'is_private') and channel.description:
messages.append({"role": "system", "content": channel.description}) # messages.append({"role": "system", "content": channel.description})
messages.append({"role": "user", "content": msg}) # messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages) # msg_len = sum(len(str(m)) for m in messages)
if msg_len * 2 > ai.max_send_char: # if msg_len * 2 > ai.max_send_char:
new_msg = channel.with_user(user_id).message_post(body=_('您所发送的提示词已超长。'), message_type='comment', # new_msg = channel.with_user(user_id).message_post(body=_('您所发送的提示词已超长。'), message_type='comment',
subtype_xmlid='mail.mt_comment', # subtype_xmlid='mail.mt_comment',
parent_id=message.id) # parent_id=message.id)
# if msg_len * 2 >= 8000: # if msg_len * 2 >= 8000:
# messages = [{"role": "user", "content": msg}] # messages = [{"role": "user", "content": msg}]

View File

@@ -11,7 +11,7 @@
</div> </div>
<img class="oe_demo oe_screenshot img img-fluid" src="demo02.jpg"> <img class="oe_demo oe_screenshot img img-fluid" src="demo02.jpg">
</div> </div>
<h3>Latest update: v17.25.02.10</h3> <h3>Latest update: v17.25.02.11</h3>
<div class="row"> <div class="row">
<img class="oe_demo oe_screenshot img img-fluid" style="max-height: 100%;" src="banner.png"> <img class="oe_demo oe_screenshot img img-fluid" style="max-height: 100%;" src="banner.png">
</div> </div>

View File

@@ -53,7 +53,7 @@
'data': [ 'data': [
'data/ai_robot_data.xml', 'data/ai_robot_data.xml',
'data/user_partner_data.xml', 'data/user_partner_data.xml',
'data/mail_channel_data.xml', 'data/discuss_channel_data.xml',
'views/ai_robot_views.xml', 'views/ai_robot_views.xml',
], ],
'assets': { 'assets': {