update to 15 gpt

This commit is contained in:
Ivan Office
2023-09-22 21:55:44 +08:00
parent e16bb569eb
commit 5d129fa6c2
52 changed files with 17234 additions and 1068 deletions

View File

@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
# from . import controllers
from . import models

View File

@@ -9,8 +9,8 @@
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
{
'name': 'ChatGPT4,Google Bard, AiGC Center.Ai服务中心聚合全网Ai',
'version': '15.23.07.29',
'name': 'ChatGPT4, China Ali,AiGC Center.Ai服务中心聚合全网Ai',
'version': '16.23.09.22',
'author': 'odooai.cn',
'company': 'odooai.cn',
'maintainer': 'odooai.cn',
@@ -21,8 +21,8 @@
'sequence': 10,
'images': ['static/description/banner.gif'],
'summary': '''
ChatGpt Odoo AI Center. Multi Ai aigc support with Google Bard Ai, Azure Ai, Baidu Ai,etc..
Support chatgpt 4 32k image. DALLE, Integration All ChatGpt Api and Azure OpenAI.
ChatGpt Odoo AI Center. Multi Ai aigc support with Ali Qwen Ai, Azure Ai, Baidu Ai,etc..
Support chatgpt 4 32k, Integration All ChatGpt Api and Azure OpenAI.
Easy Chat channel with several ChatGPT Robots and train.
''',
'description': '''
@@ -30,7 +30,7 @@
providing a more natural and intuitive user experience.
Base on is_chatgpt_integration from InTechual Solutions.
1. Multi ChatGpt openAI robot Connector. Chat and train.
2. Multi Ai support including Google Bard Ai, Azure Ai, Chatgpt 4, Chatgpt 3.5 Turbo, Chatgpt 3 Davinci, Chatgpt 2 Code Optimized, 'Dall-E Image.
2. Multi Ai support including Azure Ai, Alibaba Ai, Baidu Ai, Chatgpt 4, Chatgpt 3.5 Turbo, Chatgpt 3 Davinci.
3. Bind ChatGpt Api to user. So we can chat to robot user or use ChatGpt Channel for Group Chat.
4. White and black List for ChatGpt.
5. Setup Demo Chat time for every new user.
@@ -45,19 +45,23 @@
''',
'depends': [
'base',
'app_common',
'app_odoo_customize',
'base_setup',
'mail',
'queue_job',
],
'data': [
'security/ir.model.access.csv',
'security/ir_rules.xml',
'data/mail_channel_data.xml',
'data/ai_robot_data.xml',
'data/user_partner_data.xml',
'data/ir_config_parameter.xml',
'views/res_config_settings_views.xml',
'views/ai_robot_views.xml',
'views/res_partner_ai_use_views.xml',
'views/res_users_views.xml',
'views/mail_channel_views.xml',
],
'assets': {
'mail.assets_messaging': [
@@ -66,6 +70,9 @@
'mail.assets_model_data': [
'app_chatgpt/static/src/models_data/*.js',
],
'web.assets_backend': [
'app_chatgpt/static/src/components/*/*.xml',
],
},
'external_dependencies': {'python': ['openai']},
'installable': True,

View File

@@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
from . import main

View File

@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
from odoo import http

View File

@@ -1,15 +1,42 @@
<?xml version="1.0" encoding="UTF-8" ?>
<odoo>
<odoo noupdate="1">
<record id="chatgpt_robot" model="ai.robot">
<field name="name">ChatGPT odoo</field>
<field name="provider">openai</field>
<field name="endpoint">https://api.openai.com/v1/chat/completions</field>
<field name="sequence">1</field>
<field name="image_avatar" type="base64" file="app_chatgpt/static/description/src/openai.png"/>
</record>
<record id="chatgpt_robot1" model="ai.robot">
<field name="name">ChatGPT Coding</field>
<field name="provider">openai</field>
<field name="endpoint">https://api.openai.com/v1/chat/completions</field>
<field name="sequence">6</field>
<field name="image_avatar" type="base64" file="app_chatgpt/static/description/src/openai.png"/>
</record>
<record id="chatgpt_robot2" model="ai.robot">
<field name="name">ChatGPT Finance</field>
<field name="provider">openai</field>
<field name="endpoint">https://api.openai.com/v1/chat/completions</field>
<field name="sequence">7</field>
<field name="image_avatar" type="base64" file="app_chatgpt/static/description/src/openai.png"/>
</record>
<record id="chatgpt3_azure" model="ai.robot">
<field name="name">ChatGPT Azure</field>
<field name="provider">azure</field>
<field name="endpoint">https://my.openai.azure.com</field>
<field name="engine">gpt35</field>
<field name="api_version">2023-03-15-preview</field>
<field name="sequence">8</field>
<field name="image_avatar" type="base64" file="app_chatgpt/static/description/src/azure.png"/>
</record>
<record id="chatgpt4_azure" model="ai.robot">
<field name="name">ChatGPT4 Azure</field>
<field name="provider">azure</field>
<field name="endpoint">https://my.openai.azure.com</field>
<field name="engine">gpt4</field>
<field name="api_version">2023-03-15-preview</field>
<field name="sequence">9</field>
<field name="image_avatar" type="base64" file="app_chatgpt/static/description/src/azure.png"/>
</record>
</odoo>

View File

@@ -1,11 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<odoo>
<data noupdate="1">
<record id="config_openapi_context_timeout" model="ir.config_parameter">
<field name="key">app_chatgpt.openapi_context_timeout</field>
<field name="value">300</field>
</record>
<function model="ir.config_parameter" name="set_param" eval="('app_chatgpt.openapi_context_timeout', '300')"/>
<function model="ir.config_parameter" name="set_param" eval="('app_chatgpt.openai_sync_config', 'sync')"/>
</data>
</odoo>

View File

@@ -3,7 +3,7 @@
<data noupdate="1">
<record model="mail.channel" id="channel_chatgpt">
<field name="name">ChatGPT Group Chat</field>
<field name="description">ChatGPT企业内部频道</field>
<field name="description">ChatGPT话题</field>
<field name="image_128" type="base64" file="app_chatgpt/static/description/chatgpt.png"/>
</record>
@@ -14,15 +14,15 @@
<field name="subtype_id" ref="mail.mt_comment"/>
<field name="subject">Welcome to ChatGPT Group Chat</field>
<field name="body"><![CDATA[<p>Welcome to ChatGPT Group Chat.</p>
<p>Pleas answer me any question.</p>]]></field>
<p>Please ask me any question.</p>]]></field>
</record>
<record model="mail.channel.member" id="channel_member_chatgtp_channel_for_admin">
<field name="partner_id" ref="base.partner_admin"/>
<field name="channel_id" ref="app_chatgpt.channel_chatgpt"/>
<field name="fetched_message_id" ref="app_chatgpt.module_install_notification"/>
<field name="seen_message_id" ref="app_chatgpt.module_install_notification"/>
</record>
<!-- <record model="mail.channel.member" id="channel_member_chatgtp_channel_for_admin">-->
<!-- <field name="partner_id" ref="base.partner_admin"/>-->
<!-- <field name="channel_id" ref="app_chatgpt.channel_chatgpt"/>-->
<!-- <field name="fetched_message_id" ref="app_chatgpt.module_install_notification"/>-->
<!-- <field name="seen_message_id" ref="app_chatgpt.module_install_notification"/>-->
<!-- </record>-->
<record model="mail.channel" id="app_chatgpt.channel_chatgpt">
<field name="group_ids" eval="[Command.link(ref('base.group_user'))]"/>

View File

@@ -6,8 +6,7 @@
<field name="image_1920" type="base64" file="app_chatgpt/static/description/chatgpt.png"/>
</record>
<record id="user_chatgpt" model="res.users">
<field name="login">chatgpt@odooai.cn</field>
<field name="password">chatgpt</field>
<field name="login">chatgpt@example.com</field>
<field name="partner_id" ref="app_chatgpt.partner_chatgpt"/>
<field name="gpt_id" ref="app_chatgpt.chatgpt_robot"/>
<field name="company_id" ref="base.main_company"/>
@@ -21,9 +20,8 @@
</record>
<record id="user_chatgpt1" model="res.users">
<field name="login">chatgpt1@odooai.cn</field>
<field name="email">chatgpt1@odooai.cn</field>
<field name="password">chatgpt</field>
<field name="login">chatgpt1@example.com</field>
<field name="email">chatgpt1@example.com</field>
<field name="partner_id" ref="app_chatgpt.partner_chatgpt1"/>
<field name="gpt_id" ref="app_chatgpt.chatgpt_robot1"/>
<field name="company_id" ref="base.main_company"/>
@@ -37,14 +35,42 @@
</record>
<record id="user_chatgpt2" model="res.users">
<field name="login">chatgpt2@odooai.cn</field>
<field name="email">chatgpt2@odooai.cn</field>
<field name="password">chatgpt</field>
<field name="login">chatgpt2@example.com</field>
<field name="email">chatgpt2@example.com</field>
<field name="partner_id" ref="app_chatgpt.partner_chatgpt2"/>
<field name="gpt_id" ref="app_chatgpt.chatgpt_robot2"/>
<field name="company_id" ref="base.main_company"/>
<field name="company_ids" eval="[Command.link(ref('base.main_company'))]"/>
<field name="groups_id" eval="[Command.link(ref('base.group_user'))]"/>
</record>
<record id="partner_chatgpt3_azure" model="res.partner">
<field name="name">ChatGPT3 Azure</field>
<field name="image_1920" type="base64" file="app_chatgpt/static/description/chatgpt_blue.png"/>
</record>
<record id="user_chatgpt3_azure" model="res.users">
<field name="login">chatgpt3_azure@example.com</field>
<field name="email">chatgpt3_azure@example.com</field>
<field name="partner_id" ref="app_chatgpt.partner_chatgpt3_azure"/>
<field name="gpt_id" ref="app_chatgpt.chatgpt3_azure"/>
<field name="company_id" ref="base.main_company"/>
<field name="company_ids" eval="[Command.link(ref('base.main_company'))]"/>
<field name="groups_id" eval="[Command.link(ref('base.group_user'))]"/>
</record>
<record id="partner_chatgpt4_azure" model="res.partner">
<field name="name">ChatGPT4 Azure</field>
<field name="image_1920" type="base64" file="app_chatgpt/static/description/chatgpt4_azure.png"/>
</record>
<record id="user_chatgpt4_azure" model="res.users">
<field name="login">chatgpt4_azure@example.com</field>
<field name="email">chatgpt4_azure@example.com</field>
<field name="partner_id" ref="app_chatgpt.partner_chatgpt4_azure"/>
<field name="gpt_id" ref="app_chatgpt.chatgpt4_azure"/>
<field name="company_id" ref="base.main_company"/>
<field name="company_ids" eval="[Command.link(ref('base.main_company'))]"/>
<field name="groups_id" eval="[Command.link(ref('base.group_user'))]"/>
</record>
</data>
</odoo>

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,10 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
from . import res_partner
from . import mail_channel
from . import res_config_settings
from . import ai_robot
from . import res_partner_ai_use
from . import res_users
from . import mail_message
from . import mail_thread

View File

@@ -1,25 +1,42 @@
# -*- coding: utf-8 -*-
import requests
from odoo import api, fields, models, _
import openai.openai_object
import requests, json
import openai
import base64
from odoo import api, fields, models, modules, tools, _
from odoo.exceptions import UserError
from .lib.WordsSearch import WordsSearch
import logging
_logger = logging.getLogger(__name__)
class AiRobot(models.Model):
_name = 'ai.robot'
_description = 'Gpt Robot'
_description = 'Ai Robot'
_order = 'sequence, name'
name = fields.Char(string='Name', translate=True)
provider = fields.Selection(string="AI Provider", selection=[('openai', 'OpenAI')], required=True, default='openai')
ai_model = fields.Selection(string="AI Model", selection=[
name = fields.Char(string='Name', translate=True, required=True)
provider = fields.Selection(string="AI Provider", selection=[('openai', 'OpenAI'), ('azure', 'Azure')],
required=True, default='openai', change_default=True)
# update ai_robot set ai_model=set_ai_model
ai_model = fields.Char(string="AI Model", required=True, default='auto', help='Customize input')
set_ai_model = fields.Selection(string="Quick Set Model", selection=[
('gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0613(Default and Latest)'),
('gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-16k-0613(Big text)'),
('gpt-4', 'Chatgpt 4'),
('gpt-4-32k', 'Chatgpt 4 32k'),
('gpt-3.5-turbo', 'Chatgpt 3.5 Turbo'),
('gpt-3.5-turbo-0301', 'Chatgpt 3.5 Turbo on 20230301'),
('text-davinci-003', 'Chatgpt 3 Davinci'),
('code-davinci-002', 'Chatgpt 2 Code Optimized'),
('text-davinci-002', 'Chatgpt 2 Davinci'),
('dall-e2', 'Dall-E Image'),
], required=True, default='gpt-3.5-turbo',
help="""
], default='gpt-3.5-turbo-0613',
help="""
GPT-4: Can understand Image, generate natural language or code.
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
DALL·E: A model that can generate and edit images given a natural language prompt
Whisper: A model that can convert audio into text
@@ -29,11 +46,440 @@ Moderation: A fine-tuned model that can detect whether text may be sensitive or
GPT-3 A set of models that can understand and generate natural language
""")
openapi_api_key = fields.Char(string="API Key", help="Provide the API key here")
temperature = fields.Float(string='Temperature', default=0.9)
# begin gpt 参数
# 1. stop表示聊天机器人停止生成回复的条件可以是一段文本或者一个列表当聊天机器人生成的回复中包含了这个条件就会停止继续生成回复。
# 2. temperature0-2控制回复的“新颖度”值越高聊天机器人生成的回复越不确定和随机值越低聊天机器人生成的回复会更加可预测和常规化。
# 3. top_p0-1语言连贯性与temperature有些类似也是控制回复的“新颖度”。不同的是top_p控制的是回复中概率最高的几个可能性的累计概率之和值越小生成的回复越保守值越大生成的回复越新颖。
# 4. frequency_penalty-2~2用于控制聊天机器人回复中出现频率过高的词汇的惩罚程度。聊天机器人会尝试避免在回复中使用频率较高的词汇以提高回复的多样性和新颖度。
# 5. presence_penalty-2~2与frequency_penalty相对用于控制聊天机器人回复中出现频率较低的词汇的惩罚程度。聊天机器人会尝试在回复中使用频率较低的词汇以提高回复的多样性和新颖度。
max_tokens = fields.Integer('Max Response', default=600,
help="""
Set a limit on the number of tokens per model response.
The API supports a maximum of 4000 tokens shared between the prompt
(including system message, examples, message history, and user query) and the model's response.
One token is roughly 4 characters for typical English text.
""")
temperature = fields.Float(string='Temperature', default=1,
help="""
Controls randomness. Lowering the temperature means that the model will produce
more repetitive and deterministic responses.
Increasing the temperature will result in more unexpected or creative responses.
Try adjusting temperature or Top P but not both.
""")
top_p = fields.Float('Top Probabilities', default=0.6,
help="""
Similar to temperature, this controls randomness but uses a different method.
Lowering Top P will narrow the models token selection to likelier tokens.
Increasing Top P will let the model choose from tokens with both high and low likelihood.
Try adjusting temperature or Top P but not both
""")
# 避免使用常用词
frequency_penalty = fields.Float('Frequency Penalty', default=1,
help="""
Reduce the chance of repeating a token proportionally based on how often it has appeared in the text so far.
This decreases the likelihood of repeating the exact same text in a response.
""")
# 越大模型就趋向于生成更新的话题,惩罚已经出现过的文本
presence_penalty = fields.Float('Presence penalty', default=1,
help="""
Reduce the chance of repeating any token that has appeared in the text at all so far.
This increases the likelihood of introducing new topics in a response.
""")
# 停止回复的关键词
stop = fields.Char('Stop sequences',
help="""
Use , to separate the stop key word.
Make responses stop at a desired point, such as the end of a sentence or list.
Specify up to four sequences where the model will stop generating further tokens in a response.
The returned text will not contain the stop sequence.
""")
# 角色设定
sys_content = fields.Char('System message',
help="""
Give the model instructions about how it should behave and any context it should reference when generating a response.
You can describe the assistants personality,
tell it what it should and shouldnt answer, and tell it how to format responses.
Theres no token limit for this section, but it will be included with every API call,
so it counts against the overall token limit.
""")
# end gpt 参数
endpoint = fields.Char('End Point', default='https://api.openai.com/v1/chat/completions')
engine = fields.Char('Engine', help='If use Azure, Please input the Model deployment name.')
api_version = fields.Char('API Version', default='2022-12-01')
ai_timeout = fields.Integer('Timeout(seconds)', help="Connect timeout for Ai response", default=120)
sequence = fields.Integer('Sequence', help="Determine the display order", default=10)
sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.')
is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt')
max_send_char = fields.Integer('Max Send Char', help='Max Send Prompt Length', default=8000)
image_avatar = fields.Image('Avatar')
partner_ids = fields.One2many('res.partner', 'gpt_id', string='Partner')
partner_count = fields.Integer('#Partner', compute='_compute_partner_count', store=False)
active = fields.Boolean('Active', default=True)
def _compute_partner_count(self):
for rec in self:
rec.partner_count = len(rec.partner_ids)
def action_disconnect(self):
requests.delete('https://chatgpt.com/v1/disconnect')
def get_ai_pre(self, data, author_id=False, answer_id=False, param={}):
if self.is_filtering:
search = WordsSearch()
search.SetKeywords([])
if isinstance(data, list):
content = data[len(data)-1]['content']
else:
content = data
sensi = search.FindFirst(content)
if sensi is not None:
_logger.error('==========敏感词:%s' % sensi['Keyword'])
return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')
elif not author_id.gpt_id and answer_id.gpt_id:
user_id = answer_id.user_ids[:1]
gpt_policy = user_id.gpt_policy
gpt_wl_partners = user_id.gpt_wl_partners
is_allow = author_id.id in gpt_wl_partners.ids
if gpt_policy != 'all' and not is_allow:
# 暂时有限用户的Ai
return _('此Ai暂时未开放请联系管理员。')
else:
return False
def get_ai(self, data, author_id=False, answer_id=False, param={}):
# 通用方法
# author_id: 请求的 partner_id 对象
# answer_id: 回答的 partner_id 对象
# paramdict 形式的参数
# 调整输出为2个参数res_post详细内容is_ai是否ai的响应
self.ensure_one()
# 前置勾子,一般返回 False有问题返回响应内容用于处理敏感词等
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
if res_pre:
# 有错误内容,则返回上级内容及 is_ai为假
return res_pre, {}, False
if not hasattr(self, 'get_%s' % self.provider):
res = _('No robot provider found')
return res, {}, False
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
# 后置勾子,返回处理后的内容
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
return res_post, usage, is_ai
def get_ai_origin(self, data, author_id=False, answer_id=False, param={}):
# 通用方法
# author_id: 请求的 partner_id 对象
# answer_id: 回答的 partner_id 对象
# paramdict 形式的参数
# 调整输出为2个参数res_post详细内容is_ai是否ai的响应
self.ensure_one()
# 前置勾子,一般返回 False有问题返回响应内容用于处理敏感词等
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
if res_pre:
# 有错误内容,则返回上级内容及 is_ai为假
return res_pre, {}, False
if not hasattr(self, 'get_%s' % self.provider):
res = _('No robot provider found')
return res, {}, False
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
# 后置勾子,返回处理后的内容
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
return res
def get_ai_post(self, res, author_id=False, answer_id=False, param={}):
if res and author_id and isinstance(res, openai.openai_object.OpenAIObject) or isinstance(res, list) or isinstance(res, dict):
# 返回是个对象那么就是ai
# if isinstance(res, dict):
if self.provider == 'openai':
# openai 格式处理
usage = res['usage']
content = res['choices'][0]['message']['content']
# _logger.warning('===========Ai响应:%s' % content)
elif self.provider == 'azure':
# azure 格式
usage = json.loads(json.dumps(res['usage']))
content = json.loads(json.dumps(res['choices'][0]['message']['content']))
elif self.provider == 'ali':
# ali 格式
usage = res['usage']
content = res['output']['text']
elif self.provider == 'baidu':
# baidu 格式
usage = res['usage']
content = res['result']
else:
usage = False
content = res
data = content.replace(' .', '.').strip()
answer_user = answer_id.mapped('user_ids')[:1]
if usage:
if self.provider == 'ali':
prompt_tokens = usage['input_tokens']
completion_tokens = usage['output_tokens']
total_tokens = usage['input_tokens'] + usage['output_tokens']
else:
prompt_tokens = usage['prompt_tokens']
completion_tokens = usage['completion_tokens']
total_tokens = usage['total_tokens']
# 不是写到 user ,是要写到指定 m2m 相关模型, 如: res.partner.ai.use
ai_use = self.env['res.partner.ai.use'].search([('name', '=', author_id.id)], limit=1)
ask_date = fields.Datetime.now()
if not ai_use:
ai_use.create({
'name': author_id.id,
'ai_user_id': answer_user.id,
'human_prompt_tokens': prompt_tokens,
'ai_completion_tokens': completion_tokens,
'tokens_total': total_tokens,
'used_number': 1,
'first_ask_time': ask_date,
'latest_ask_time': ask_date
})
else:
vals = {
'human_prompt_tokens': ai_use.human_prompt_tokens + prompt_tokens,
'ai_completion_tokens': ai_use.ai_completion_tokens + completion_tokens,
'tokens_total': ai_use.tokens_total + total_tokens,
'used_number': ai_use.used_number + 1,
'latest_ask_time': ask_date
}
if not ai_use.first_ask_time:
vals.update({
'first_ask_time': ask_date
})
ai_use.write(vals)
return data, usage, True
else:
# 直接返回错误语句那么就是非ai
return res, False, False
def get_ai_system(self, content=None):
# 获取基础ai角色设定, role system
sys_content = content or self.sys_content
if sys_content:
return {"role": "system", "content": sys_content}
return {}
def get_ai_model_info(self):
self.ensure_one()
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
R_TIMEOUT = self.ai_timeout or 120
o_url = "https://api.openai.com/v1/models/%s" % self.ai_model
if self.endpoint:
o_url = self.endpoint.replace("/chat/completions", "") + "/models/%s" % self.ai_model
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
response.close()
if response:
res = response.json()
r_text = json.dumps(res, indent=2)
else:
r_text = 'No response.'
raise UserError(r_text)
def get_ai_list_model(self):
self.ensure_one()
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
R_TIMEOUT = self.ai_timeout or 120
o_url = "https://api.openai.com/v1/models"
if self.endpoint:
o_url = self.endpoint.replace("/chat/completions", "") + "/models"
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
response.close()
if response:
res = response.json()
r_text = json.dumps(res, indent=2)
else:
r_text = 'No response.'
raise UserError(r_text)
def get_openai(self, data, author_id, answer_id, param={}):
self.ensure_one()
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
R_TIMEOUT = self.ai_timeout or 120
o_url = self.endpoint or "https://api.openai.com/v1/chat/completions"
# 处理传参,传过来的优先于 robot 默认的
max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
temperature = param.get('temperature') if param.get('temperature') else self.temperature
top_p = param.get('top_p') if param.get('top_p') else self.top_p
frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
if self.stop:
stop = self.stop.split(',')
else:
stop = ["Human:", "AI:"]
# 以下处理 open ai
if self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
# 基本与 azure 同,要处理 api_base
openai.api_key = self.openapi_api_key
openai.api_base = o_url.replace('/chat/completions', '')
if isinstance(data, list):
messages = data
else:
messages = [{"role": "user", "content": data}]
# Ai角色设定如果没设定则再处理
if messages[0].get('role') != 'system':
sys_content = self.get_ai_system(param.get('sys_content'))
if sys_content:
messages.insert(0, sys_content)
# todo: 当前反向代理方式不通,要调整为 远程主机中接受请求post到openai再将结果返回给请求者
# response = openai.ChatCompletion.create(
# model=self.ai_model,
# messages=messages,
# # 返回的回答数量
# n=1,
# max_tokens=max_tokens,
# temperature=temperature,
# top_p=top_p,
# frequency_penalty=frequency_penalty,
# presence_penalty=presence_penalty,
# stop=stop,
# request_timeout=request_timeout,
# )
# if 'choices' in response:
# return response
# todo: 两种方式一样,要调整 v 服务器的二次处理 /root/toai.py
pdata = {
"model": self.ai_model,
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"stop": stop
}
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
try:
res = response.json()
if 'choices' in res:
return res
except Exception as e:
_logger.warning("Get Response Json failed: %s", e)
else:
_logger.warning('=====================Openai output data: %s' % response.json())
elif self.ai_model == 'dall-e2':
# todo: 处理 图像引擎,主要是返回参数到聊天中
# image_url = response['data'][0]['url']
# https://platform.openai.com/docs/guides/images/introduction
pdata = {
"prompt": data,
"n": 3,
"size": "1024x1024",
}
return '建设中'
else:
pdata = {
"model": self.ai_model,
"prompt": data,
"temperature": 1,
"max_tokens": max_tokens,
"top_p": 0.6,
"frequency_penalty": 0.1,
"presence_penalty": 0.1,
"stop": stop
}
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
res = response.json()
if 'choices' in res:
res = '\n'.join([x['text'] for x in res['choices']])
return res
return _("Response Timeout, please speak again.")
def get_azure(self, data, author_id, answer_id, param={}):
self.ensure_one()
# only for azure
openai.api_type = self.provider
if not self.endpoint:
raise UserError(_("Please Set your AI robot's endpoint first."))
openai.api_base = self.endpoint
if not self.api_version:
raise UserError(_("Please Set your AI robot's API Version first."))
openai.api_version = self.api_version
openai.api_key = self.openapi_api_key
if self.stop:
stop = self.stop.split(',')
else:
stop = ["Human:", "AI:"]
if isinstance(data, list):
messages = data
else:
messages = [{"role": "user", "content": data}]
# 处理传参,传过来的优先于 robot 默认的
max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
temperature = param.get('temperature') if param.get('temperature') else self.temperature
top_p = param.get('top_p') if param.get('top_p') else self.top_p
frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
request_timeout= param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
# Ai角色设定如果没设定则再处理
if messages[0].get('role') != 'system':
sys_content = self.get_ai_system(param.get('sys_content'))
if sys_content:
messages.insert(0, sys_content)
# 暂时不变
response = openai.ChatCompletion.create(
engine=self.engine,
messages=messages,
# 返回的回答数量
n=1,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=None,
request_timeout=request_timeout,
)
if 'choices' in response:
return response
else:
_logger.warning('=====================azure output data: %s' % response.json())
return _("Response Timeout, please speak again.")
@api.onchange('provider')
def _onchange_provider(self):
if self.provider == 'openai':
self.endpoint = 'https://api.openai.com/v1/chat/completions'
elif self.provider == 'azure':
self.endpoint = 'https://odoo.openai.azure.com'
if self.provider:
# 取头像
module_path = modules.get_module_path('app_chatgpt', display_warning=False)
if module_path:
path = modules.check_resource_path(module_path, ('static/description/src/%s.png' % self.provider))
if path:
image_file = tools.file_open(path, 'rb')
self.image_avatar = base64.b64encode(image_file.read())
@api.onchange('set_ai_model')
def _onchange_set_ai_model(self):
if self.set_ai_model:
self.ai_model = self.set_ai_model
else:
self.ai_model = None
def filter_sensitive_words(self, data):
if self.is_filtering:
search = WordsSearch()
s = self.sensitive_words
if s:
search.SetKeywords(s.split('\n'))
else:
search.SetKeywords([])
data = search.Replace(text=data)
return data
else:
return data

View File

@@ -0,0 +1,296 @@
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# ToolGood.Words.WordsSearch.py
# 2020, Lin Zhijun, https://github.com/toolgood/ToolGood.Words
# Licensed under the Apache License 2.0
# 更新日志
# 2020.04.06 第一次提交
# 2020.05.16 修改支持大于0xffff的字符
import os
__all__ = ['WordsSearch']
__author__ = 'Lin Zhijun'
__date__ = '2020.05.16'
class TrieNode():
def __init__(self):
self.Index = 0
self.Index = 0
self.Layer = 0
self.End = False
self.Char = ''
self.Results = []
self.m_values = {}
self.Failure = None
self.Parent = None
def Add(self, c):
if c in self.m_values:
return self.m_values[c]
node = TrieNode()
node.Parent = self
node.Char = c
self.m_values[c] = node
return node
def SetResults(self, index):
if (self.End == False):
self.End = True
self.Results.append(index)
class TrieNode2():
def __init__(self):
self.End = False
self.Results = []
self.m_values = {}
self.minflag = 0xffff
self.maxflag = 0
def Add(self, c, node3):
if (self.minflag > c):
self.minflag = c
if (self.maxflag < c):
self.maxflag = c
self.m_values[c] = node3
def SetResults(self, index):
if (self.End == False):
self.End = True
if (index in self.Results) == False:
self.Results.append(index)
def HasKey(self, c):
return c in self.m_values
def TryGetValue(self, c):
if (self.minflag <= c and self.maxflag >= c):
if c in self.m_values:
return self.m_values[c]
return None
class WordsSearch():
def __init__(self):
self._first = {}
self._keywords = []
self._indexs = []
def SetKeywords(self, keywords):
keyword_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'sensi_words.txt')
s = open(keyword_path, 'r+', encoding='utf-8').read().split('\n')
keywords += s
self._keywords = keywords
self._indexs = []
for i in range(len(keywords)):
self._indexs.append(i)
root = TrieNode()
allNodeLayer = {}
for i in range(len(self._keywords)): # for (i = 0; i < _keywords.length; i++)
p = self._keywords[i]
nd = root
for j in range(len(p)): # for (j = 0; j < p.length; j++)
nd = nd.Add(ord(p[j]))
if (nd.Layer == 0):
nd.Layer = j + 1
if nd.Layer in allNodeLayer:
allNodeLayer[nd.Layer].append(nd)
else:
allNodeLayer[nd.Layer] = []
allNodeLayer[nd.Layer].append(nd)
nd.SetResults(i)
allNode = []
allNode.append(root)
for key in allNodeLayer.keys():
for nd in allNodeLayer[key]:
allNode.append(nd)
allNodeLayer = None
for i in range(len(allNode)): # for (i = 0; i < allNode.length; i++)
if i == 0:
continue
nd = allNode[i]
nd.Index = i
r = nd.Parent.Failure
c = nd.Char
while (r != None and (c in r.m_values) == False):
r = r.Failure
if (r == None):
nd.Failure = root
else:
nd.Failure = r.m_values[c]
for key2 in nd.Failure.Results:
nd.SetResults(key2)
root.Failure = root
allNode2 = []
for i in range(len(allNode)): # for (i = 0; i < allNode.length; i++)
allNode2.append(TrieNode2())
for i in range(len(allNode2)): # for (i = 0; i < allNode2.length; i++)
oldNode = allNode[i]
newNode = allNode2[i]
for key in oldNode.m_values:
index = oldNode.m_values[key].Index
newNode.Add(key, allNode2[index])
for index in range(len(oldNode.Results)): # for (index = 0; index < oldNode.Results.length; index++)
item = oldNode.Results[index]
newNode.SetResults(item)
oldNode = oldNode.Failure
while oldNode != root:
for key in oldNode.m_values:
if (newNode.HasKey(key) == False):
index = oldNode.m_values[key].Index
newNode.Add(key, allNode2[index])
for index in range(len(oldNode.Results)):
item = oldNode.Results[index]
newNode.SetResults(item)
oldNode = oldNode.Failure
allNode = None
root = None
# first = []
# for index in range(65535):# for (index = 0; index < 0xffff; index++)
# first.append(None)
# for key in allNode2[0].m_values :
# first[key] = allNode2[0].m_values[key]
self._first = allNode2[0]
def FindFirst(self, text):
ptr = None
for index in range(len(text)): # for (index = 0; index < text.length; index++)
t = ord(text[index]) # text.charCodeAt(index)
tn = None
if (ptr == None):
tn = self._first.TryGetValue(t)
else:
tn = ptr.TryGetValue(t)
if (tn == None):
tn = self._first.TryGetValue(t)
if (tn != None):
if (tn.End):
item = tn.Results[0]
keyword = self._keywords[item]
return {"Keyword": keyword, "Success": True, "End": index, "Start": index + 1 - len(keyword), "Index": self._indexs[item]}
ptr = tn
return None
def FindAll(self, text):
ptr = None
list = []
for index in range(len(text)): # for (index = 0; index < text.length; index++)
t = ord(text[index]) # text.charCodeAt(index)
tn = None
if (ptr == None):
tn = self._first.TryGetValue(t)
else:
tn = ptr.TryGetValue(t)
if (tn == None):
tn = self._first.TryGetValue(t)
if (tn != None):
if (tn.End):
for j in range(len(tn.Results)): # for (j = 0; j < tn.Results.length; j++)
item = tn.Results[j]
keyword = self._keywords[item]
list.append({"Keyword": keyword, "Success": True, "End": index, "Start": index + 1 - len(keyword), "Index": self._indexs[item]})
ptr = tn
return list
def ContainsAny(self, text):
ptr = None
for index in range(len(text)): # for (index = 0; index < text.length; index++)
t = ord(text[index]) # text.charCodeAt(index)
tn = None
if (ptr == None):
tn = self._first.TryGetValue(t)
else:
tn = ptr.TryGetValue(t)
if (tn == None):
tn = self._first.TryGetValue(t)
if (tn != None):
if (tn.End):
return True
ptr = tn
return False
def Replace(self, text, replaceChar='*'):
result = list(text)
ptr = None
for i in range(len(text)): # for (i = 0; i < text.length; i++)
t = ord(text[i]) # text.charCodeAt(index)
tn = None
if (ptr == None):
tn = self._first.TryGetValue(t)
else:
tn = ptr.TryGetValue(t)
if (tn == None):
tn = self._first.TryGetValue(t)
if (tn != None):
if (tn.End):
maxLength = len(self._keywords[tn.Results[0]])
start = i + 1 - maxLength
for j in range(start, i + 1): # for (j = start; j <= i; j++)
result[j] = replaceChar
ptr = tn
return ''.join(result)
if __name__ == "__main__":
s = "中国|国人|zg人|乾清宫"
test = "我是中国人"
search = WordsSearch()
search.SetKeywords(s.split('|'))
print("----------------------------------- WordsSearch -----------------------------------")
print("WordsSearch FindFirst is run.")
f = search.FindFirst(test)
if f["Keyword"] != "中国":
print("WordsSearch FindFirst is error.............................")
print("WordsSearch FindFirst is run.")
all = search.FindAll("乾清宫")
if all[0]["Keyword"] != "乾清宫":
print("WordsSearch FindFirst is error.............................")
print("WordsSearch FindAll is run.")
all = search.FindAll(test)
if all[0]["Keyword"] != "中国":
print("WordsSearch FindAll is error.............................")
if all[1]["Keyword"] != "国人":
print("WordsSearch FindAll is error.............................")
if all[0]["Start"] != 2:
print("WordsSearch FindAll is error.............................")
if all[0]["End"] != 3:
print("WordsSearch FindAll is error.............................")
if len(all) != 2:
print("WordsSearch FindAll is error.............................")
print("WordsSearch ContainsAny is run.")
b = search.ContainsAny(test)
if b == False:
print("WordsSearch ContainsAny is error.............................")
print("WordsSearch Replace is run.")
txt = search.Replace(test)
if (txt != "我是***"):
print("WordsSearch Replace is error.............................")
print("----------------------------------- Test End -----------------------------------")

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,14 @@
# -*- coding: utf-8 -*-
import openai
import requests,json
import requests, json
import datetime
# from transformers import TextDavinciTokenizer, TextDavinciModel
from odoo import api, fields, models, _
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
from odoo.osv import expression
from odoo.addons.app_common.models.base import get_ua_type
import logging
_logger = logging.getLogger(__name__)
@@ -14,207 +16,345 @@ _logger = logging.getLogger(__name__)
class Channel(models.Model):
_inherit = 'mail.channel'
@api.model
def get_openai(self, api_key, ai_model, data, user="Odoo"):
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
R_TIMEOUT = 5
is_private = fields.Boolean(string="Private", default=False, help="Check to set Private, Can only use by user, not Public")
# 因为 channel_member_ids 不好处理,在此增加此字段
# 主Ai
ai_partner_id = fields.Many2one(comodel_name="res.partner", string="Main Ai", required=False,
domain=[('gpt_id', '!=', None), ('is_chat_private', '=', True)],
default=lambda self: self._app_get_m2o_default('ai_partner_id'),
help="Main Ai is the robot help you default.")
ext_ai_partner_id = fields.Many2one(comodel_name="res.partner", string="Secondary Ai",
domain=[('gpt_id', '!=', None), ('is_chat_private', '=', True)])
description = fields.Char('Ai Character', help="Ai would help you act as the Character set.")
set_max_tokens = fields.Selection([
('300', 'Short'),
('600', 'Standard'),
('1000', 'Medium'),
('2000', 'Long'),
('3000', 'Overlength'),
('32000', '32K'),
], string='Max Response', default='600', help="越大返回内容越多,计费也越多")
set_chat_count = fields.Selection([
('none', 'Ai Auto'),
('1', '1标准'),
('3', '3强关联'),
('5', '5超强关联'),
], string="History Count", default='1', help="0-5设定后会将最近n次对话发给Ai有助于他更好的回答但太大费用也高")
set_temperature = fields.Selection([
('2', '天马行空'),
('1.5', '创造性'),
('1', '标准'),
('0.6', '理性'),
('0.1', '保守'),
], string="Set Temperature", default='1', help="0-21值越大越富有想像力越小则越保守")
set_top_p = fields.Selection([
('0.9', '严谨惯性思维'),
('0.6', '标准推理'),
('0.4', '跳跃性'),
('0.1', '随便'),
], string="Top Probabilities", default='0.6', help="0-1值越大越倾向大众化的连贯思维")
# 避免使用常用词
set_frequency_penalty = fields.Selection([
('2', '老学究-晦涩难懂'),
('1.5', '学院派-较多高级词'),
('1', '标准'),
('0.1', '少常用词'),
('-1', '通俗易懂'),
('-2', '大白话'),
], string='Frequency Penalty', default='1', help="-2~2值越大越少使用常用词")
set_presence_penalty = fields.Selection([
('2', '多样强迫症'),
('1.5', '新颖化'),
('1', '标准'),
('0.1', '允许常规重复'),
('-1', '允许较多重复'),
('-2', '更多强调重复'),
], string='Presence penalty', default='1', help="-2~2值越大越少重复词")
if ai_model == 'dall-e2':
# todo: 处理 图像引擎,主要是返回参数到聊天中
# image_url = response['data'][0]['url']
# https://platform.openai.com/docs/guides/images/introduction
pdata = {
"prompt": data,
"n": 3,
"size": "1024x1024",
}
return '建设中'
elif ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
pdata = {
"model": ai_model,
"messages": [{"role": "user", "content": data}],
"temperature": 0.9,
"max_tokens": 2000,
"top_p": 1,
"frequency_penalty": 0.0,
"presence_penalty": 0.6,
"user": user,
"stop": ["Human:", "AI:"]
}
response = requests.post("https://api.openai.com/v1/chat/completions", data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
res = response.json()
if 'choices' in res:
# for rec in res:
# res = rec['message']['content']
res = '\n'.join([x['message']['content'] for x in res['choices']])
return res
else:
pdata = {
"model": ai_model,
"prompt": data,
"temperature": 0.9,
"max_tokens": 2000,
"top_p": 1,
"frequency_penalty": 0.0,
"presence_penalty": 0.6,
"user": user,
"stop": ["Human:", "AI:"]
}
response = requests.post("https://api.openai.com/v1/completions", data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
res = response.json()
if 'choices' in res:
res = '\n'.join([x['text'] for x in res['choices']])
return res
# 获取模型信息
# list_model = requests.get("https://api.openai.com/v1/models", headers=headers)
# model_info = requests.get("https://api.openai.com/v1/models/%s" % ai_model, headers=headers)
# todo: 这里用 compute?
max_tokens = fields.Integer('最长响应Token', default=600, help="越大返回内容越多,计费也越多")
chat_count = fields.Integer(string="上下文数量", default=0, help="0~3设定后会将最近n次对话发给Ai有助于他更好的回答")
temperature = fields.Float(string="创造性值", default=1, help="0~2值越大越富有想像力越小则越保守")
top_p = fields.Float(string="连贯性值", default=0.6, help="0~1值越大越富有想像力越小则越保守")
frequency_penalty = fields.Float('避免常用词值', default=1, help="-2~2值越大越少使用常用词")
presence_penalty = fields.Float('避免重复词值', default=1, help="-2~2值越大越少重复词")
return "获取结果超时,请重新跟我聊聊。"
is_current_channel = fields.Boolean('是否当前用户默认频道', compute='_compute_is_current_channel', help='是否当前用户默认微信对话频道')
@api.model
def get_openai_context(self, channel_id, partner_chatgpt, current_prompt, seconds=600):
afterTime = fields.Datetime.now() - datetime.timedelta(seconds=seconds)
message_model = self.env['mail.message'].sudo()
prompt = [f"Human:{current_prompt}\nAI:", ]
domain = [('res_id', '=', channel_id),
('model', '=', 'mail.channel'),
('message_type', '!=', 'user_notification'),
('parent_id', '=', False),
('date', '>=', afterTime),
('author_id', '=', self.env.user.partner_id.id)]
messages = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=15)
# print('domain:',domain)
# print('messages:',messages)
for msg in messages:
ai_msg = message_model.search([("res_id", "=", channel_id),
('model', '=', msg.model),
('parent_id', '=', msg.id),
('author_id', '=', partner_chatgpt),
('body', '!=', '<p>获取结果超时,请重新跟我聊聊。</p>')])
if ai_msg:
prompt.append("Human:%s\nAI:%s" % (
msg.body.replace("<p>", "").replace("</p>", ""), ai_msg.body.replace("<p>", "").replace("</p>", "")))
# print(msg.body.replace("<p>", "").replace("</p>", ""))
# print(ai_msg.body.replace("<p>", "").replace("</p>", ""))
def name_get(self):
result = []
for c in self:
if c.channel_type == 'channel' and c.is_private:
pre = '[私]'
else:
_logger.error(f"not find for id:{str(msg.id)}")
pre = ''
result.append((c.id, "%s%s" % (pre, c.name or '')))
return result
return '\n'.join(prompt[::-1])
def get_openai_context(self, channel_id, author_id, answer_id, minutes=60, chat_count=0):
# 上下文处理,要处理群的方式,以及独聊的方式
# azure新api 处理
context_history = []
afterTime = fields.Datetime.now() - datetime.timedelta(minutes=minutes)
message_model = self.env['mail.message'].sudo()
# 处理消息: 取最新问题 + 上 chat_count=1次的交互将之前的交互按时间顺序拼接。
# 注意: ai 每一次回复都有 parent_id 来处理连续性
# 私聊处理
def get_chatgpt_answer(self, prompt, partner_name):
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.6,
max_tokens=3000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
user=partner_name,
)
res = response['choices'][0]['text']
return res
# todo: 更好的处理方式
domain = [('res_id', '=', channel_id),
('model', '=', 'mail.channel'),
('message_type', '!=', 'user_notification'),
('parent_id', '!=', False),
('is_ai', '=', True),
('body', '!=', '<p>%s</p>' % _('Response Timeout, please speak again.')),
('body', '!=', _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。'))]
if self.channel_type in ['group', 'channel']:
# 群聊增加时间限制,当前找所有人,不限制 author_id
domain = expression.AND([domain, [('date', '>=', afterTime)]])
else:
domain = expression.AND([domain, [('author_id', '=', answer_id.id)]])
if chat_count == 0:
ai_msg_list = []
else:
ai_msg_list = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=chat_count)
for ai_msg in ai_msg_list:
# 判断这个 ai_msg 是不是ai发有才 insert。 判断 user_msg 是不是 user发的有才 insert
user_msg = ai_msg.parent_id.sudo()
if ai_msg.author_id.sudo().gpt_id and answer_id.sudo().gpt_id and ai_msg.author_id.sudo().gpt_id == answer_id.sudo().gpt_id:
ai_content = str(ai_msg.body).replace("<p>", "").replace("</p>", "").replace("<p>", "")
context_history.insert(0, {
'role': 'assistant',
'content': ai_content,
})
if not user_msg.author_id.gpt_id:
user_content = user_msg.description.replace("<p>", "").replace("</p>", "").replace('@%s' % answer_id.name, '').lstrip()
context_history.insert(0, {
'role': 'user',
'content': user_content,
})
return context_history
def get_ai_config(self, ai):
# 勾子用于取ai 配置
return {}
def get_ai_response(self, ai, messages, channel, user_id, message):
author_id = message.create_uid.partner_id
answer_id = user_id.partner_id
# todo: 只有个人配置的群聊才给配置
param = self.get_ai_config(ai)
res, usage, is_ai = ai.get_ai(messages, author_id, answer_id, param)
if res:
if get_ua_type() != 'wxweb':
# 处理当微信语音返回时,是直接回文本信息,不需要转换回车
res = res.replace('\n', '<br/>')
new_msg = channel.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id)
if usage:
if ai.provider == 'ali':
prompt_tokens = usage['input_tokens']
completion_tokens = usage['output_tokens']
total_tokens = usage['input_tokens'] + usage['output_tokens']
else:
prompt_tokens = usage['prompt_tokens']
completion_tokens = usage['completion_tokens']
total_tokens = usage['total_tokens']
new_msg.write({
'human_prompt_tokens': prompt_tokens,
'ai_completion_tokens': completion_tokens,
'cost_tokens': total_tokens,
})
def _notify_thread(self, message, msg_vals=False, **kwargs):
rdata = super(Channel, self)._notify_thread(message, msg_vals=msg_vals, **kwargs)
# print(f'rdata:{rdata}')
to_partner_id = self.env['res.partner']
answer_id = self.env['res.partner']
user_id = self.env['res.users']
author_id = msg_vals.get('author_id')
gpt_id = self.env['ai.robot']
ai = self.env['ai.robot'].sudo()
channel = self.env['mail.channel']
channel_type = self.channel_type
messages = []
# 不处理 一般notify但处理欢迎
if '<div class="o_mail_notification' in message.body and message.body != _('<div class="o_mail_notification">joined the channel</div>'):
return rdata
if 'o_odoobot_command' in message.body:
return rdata
if channel_type == 'chat':
channel_partner_ids = self.channel_partner_ids
to_partner_id = channel_partner_ids - message.author_id
user_id = to_partner_id.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
if user_id:
answer_id = channel_partner_ids - message.author_id
user_id = answer_id.mapped('user_ids').sudo().filtered(lambda r: r.gpt_id)[:1]
if user_id and answer_id.gpt_id:
gpt_policy = user_id.gpt_policy
gpt_wl_users = user_id.gpt_wl_users
is_allow = message.create_uid.id in gpt_wl_users.ids
gpt_wl_partners = user_id.gpt_wl_partners
is_allow = message.author_id.id in gpt_wl_partners.ids
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
gpt_id = user_id.gpt_id
ai = answer_id.sudo().gpt_id
elif channel_type in ['group', 'channel']:
# partner_ids = @ ids
partner_ids = list(msg_vals.get('partner_ids'))
if hasattr(self, 'ai_partner_id') and self.ai_partner_id:
# 当有主id时使用主id
if self.ai_partner_id.id in partner_ids:
partner_ids = [self.ai_partner_id.id]
if partner_ids:
# 常规群聊 @
partners = self.env['res.partner'].search([('id', 'in', partner_ids)])
# user_id = user has binded gpt robot
user_id = partners.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
if user_id:
gpt_policy = user_id.gpt_policy
gpt_wl_users = user_id.gpt_wl_users
is_allow = message.create_uid.id in gpt_wl_users.ids
to_partner_id = user_id.partner_id
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
gpt_id = user_id.gpt_id
# user_id = user, who has binded gpt robot
user_id = partners.mapped('user_ids').sudo().filtered(lambda r: r.gpt_id)[:1]
elif message.body == _('<div class="o_mail_notification">joined the channel</div>'):
# 欢迎的情况
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
user_id = partners.mapped('user_ids')[:1]
elif self.member_count == 2:
# 处理独聊频道
if hasattr(self, 'is_private') and not self.is_private:
# 2个人的非私有频道不处理
pass
else:
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id and r != message.author_id)[:1]
user_id = partners.mapped('user_ids')[:1]
elif not message.author_id.gpt_id:
# 没有@时默认第一个robot
# robot = self.env.ref('app_chatgpt.chatgpt_robot')
# 临时用azure
if hasattr(self, 'ai_partner_id') and self.ai_partner_id:
# 当有主id时使用主id
user_id = self.ai_partner_id.mapped('user_ids')[:1]
else:
# 使用群里的第一个robot
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
user_id = partners.mapped('user_ids')[:1]
if user_id:
ai = user_id.sudo().gpt_id
# 此处理不判断,将此处逻辑迁移至 get_ai_pre 非ai回复的直接内容注意设置为 is_ai=false
# gpt_policy = user_id.gpt_policy
# gpt_wl_partners = user_id.gpt_wl_partners
# is_allow = message.author_id.id in gpt_wl_partners.ids
# answer_id = user_id.partner_id
# if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
# ai = user_id.sudo().gpt_id
# elif user_id.gpt_id and not is_allow:
# # 暂时有限用户的Ai
# raise UserError(_('此Ai暂时未开放请联系管理员。'))
if hasattr(ai, 'is_translator') and ai.is_translator:
return rdata
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
# print('author_id:',author_id)
if message.body == _('<div class="o_mail_notification">joined the channel</div>'):
msg = _("Please warmly welcome our new partner %s and send him the best wishes.") % message.author_id.name
else:
# 不能用 preview 如果用 : 提示词则 preview信息丢失
plaintext_ct = tools.html_to_inner_content(message.body)
msg = plaintext_ct.replace('@%s' % answer_id.name, '').lstrip()
# print('partner_chatgpt.id:',partner_chatgpt.id)
prompt = msg_vals.get('body')
# print('prompt:', prompt)
# print('-----')
if not prompt:
if not msg:
return rdata
if self._context.get('app_ai_sync_config') and self._context.get('app_ai_sync_config') in ['sync', 'async']:
sync_config = self._context.get('app_ai_sync_config')
else:
sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config')
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
api_key = ''
if gpt_id:
api_key = gpt_id.openapi_api_key
# ai处理不要自问自答
if ai and answer_id != message.author_id:
api_key = ai.openapi_api_key
if not api_key:
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
return rdata
try:
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 600
except:
openapi_context_timeout = 600
try:
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60
except:
openapi_context_timeout = 60
openai.api_key = api_key
# 非4版本取0次。其它取3 次历史
chat_count = 3
if '4' in ai.ai_model or '4' in ai.name:
chat_count = 1
if hasattr(self, 'chat_count'):
if self.chat_count > 0:
chat_count = 1
else:
chat_count = chat_count
openai.api_key = api_key
partner_name = ''
# print(msg_vals)
# print(msg_vals.get('record_name', ''))
# print('self.channel_type :',self.channel_type)
if gpt_id:
ai_model = gpt_id.ai_model or 'text-davinci-003'
# print('chatgpt_name:', chatgpt_name)
# if author_id != to_partner_id.id and (chatgpt_name in msg_vals.get('record_name', '') or 'ChatGPT' in msg_vals.get('record_name', '') ) and self.channel_type == 'chat':
if author_id != to_partner_id.id and self.channel_type == 'chat':
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
try:
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt, openapi_context_timeout)
print(prompt)
# res = self.get_chatgpt_answer(prompt,partner_name)
res = self.get_openai(api_key, ai_model, prompt, partner_name)
res = res.replace('\n', '<br/>')
# print('res:',res)
# print('channel:',channel)
channel.with_user(user_id).message_post(body=res, message_type='comment',subtype_xmlid='mail.mt_comment', parent_id=message.id)
# channel.with_user(user_chatgpt).message_post(body=res, message_type='notification', subtype_xmlid='mail.mt_comment')
# channel.sudo().message_post(
# body=res,
# author_id=partner_chatgpt.id,
# message_type="comment",
# subtype_xmlid="mail.mt_comment",
# )
# self.with_user(user_chatgpt).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment')
except Exception as e:
raise UserError(_(e))
if author_id != answer_id.id and self.channel_type == 'chat':
# 私聊
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
# todo: 公开的群聊当前只开1个后续更多
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
channel = chatgpt_channel_id
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and self.channel_type in ['group', 'channel']:
# 高级用户自建的话题
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
if hasattr(channel, 'is_private') and channel.description:
messages.append({"role": "system", "content": channel.description})
elif author_id != to_partner_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
try:
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt, openapi_context_timeout)
# print(prompt)
# res = self.get_chatgpt_answer(prompt, partner_name)
res = self.get_openai(api_key, ai_model, prompt, partner_name)
res = res.replace('\n', '<br/>')
chatgpt_channel_id.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment',parent_id=message.id)
except Exception as e:
raise UserError(_(e))
try:
c_history = self.get_openai_context(channel.id, author_id, answer_id, openapi_context_timeout, chat_count)
if c_history:
messages += c_history
messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages)
# 接口最大接收 8430 Token
if msg_len * 2 > ai.max_send_char:
messages = []
if hasattr(channel, 'is_private') and channel.description:
messages.append({"role": "system", "content": channel.description})
messages.append({"role": "user", "content": msg})
msg_len = sum(len(str(m)) for m in messages)
if msg_len * 2 > ai.max_send_char:
new_msg = channel.with_user(user_id).message_post(body=_('您所发送的提示词已超长。'), message_type='comment',
subtype_xmlid='mail.mt_comment',
parent_id=message.id)
# if msg_len * 2 >= 8000:
# messages = [{"role": "user", "content": msg}]
if sync_config == 'sync':
self.get_ai_response(ai, messages, channel, user_id, message)
else:
self.with_delay().get_ai_response(ai, messages, channel, user_id, message)
except Exception as e:
raise UserError(_(e))
return rdata
def _message_post_after_hook(self, message, msg_vals):
if message.author_id.gpt_id:
if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。'),
_('此Ai暂时未开放请联系管理员。'), _('您所发送的提示词已超长。')]:
message.is_ai = True
return super(Channel, self)._message_post_after_hook(message, msg_vals)
@api.model
def _get_my_last_cid(self):
# 获取当前用户最后一次进入的channel返回该channel的id
# todo: 优化,每次聊天进入时就 write
user = self.env.user
msgs = self.env['mail.message'].sudo().search([
('model', '=', 'mail.channel'),
('author_id', '=', user.partner_id.id),
], limit=3, order='id desc')
c_id = 0
c = self
for m in msgs:
c = self.browse(m.res_id)
if c.is_member:
c_id = c.id
break
if not c_id:
c = self.env.ref('app_chatgpt.channel_chatgpt', raise_if_not_found=False)
c_id = c.id or False
if c and not c.is_member:
c.sudo().add_members([user.partner_id.id])
return c_id
@api.onchange('ai_partner_id')
def _onchange_ai_partner_id(self):
if self.ai_partner_id and self.ai_partner_id.image_1920:
self.image_128 = self.ai_partner_id.avatar_128

View File

@@ -6,8 +6,25 @@ from odoo import fields, models
class Message(models.Model):
_inherit = "mail.message"
human_prompt_tokens = fields.Integer('Human Prompt Tokens')
ai_completion_tokens = fields.Integer('AI Completion Tokens')
cost_tokens = fields.Integer('Cost Tokens')
# 是否ai回复
is_ai = fields.Boolean('Is Ai', default=False)
def _message_add_reaction(self, content):
super(Message, self)._message_add_reaction(content)
if self.create_uid.gpt_id:
# 处理反馈
pass
def message_format(self, format_reply=True):
message_values = super(Message, self).message_format(format_reply=format_reply)
for message in message_values:
message_sudo = self.browse(message['id']).sudo().with_prefetch(self.ids)
message['human_prompt_tokens'] = message_sudo.human_prompt_tokens
message['ai_completion_tokens'] = message_sudo.ai_completion_tokens
message['cost_tokens'] = message_sudo.cost_tokens
message['is_ai'] = message_sudo.is_ai
return message_values

View File

@@ -0,0 +1,8 @@
# -*- coding: utf-8 -*-
from odoo import fields, models, api, _
class MailThread(models.AbstractModel):
_inherit = "mail.thread"

View File

@@ -0,0 +1,21 @@
{
'id': 'chatcmpl-747IRWr2Ij3HA6NVTWp4ZTnEA2grW',
'object': 'chat.completion',
'created': 1681215715,
'model': 'gpt-3.5-turbo-0301',
'usage': {
'prompt_tokens': 17,
'completion_tokens': 38,
'total_tokens': 55
},
'choices': [
{
'message': {
'role': 'assistant',
'content': ' '
},
'finish_reason': 'stop',
'index': 0
}
]
}

View File

@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
from odoo import fields, models
@@ -7,4 +6,11 @@ from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
openapi_context_timeout = fields.Integer(string="上下文连接超时", help="多少秒以内的聊天信息作为上下文继续", config_parameter="app_chatgpt.openapi_context_timeout")
openapi_context_timeout = fields.Integer(string="Connect Timout", help="群聊中多少分钟以内的聊天信息作为上下文继续", config_parameter="app_chatgpt.openapi_context_timeout")
openai_sync_config = fields.Selection([
('sync', 'Synchronous'),
('async', 'Asynchronous')
], string='Sync Config', default='sync', config_parameter="app_chatgpt.openai_sync_config")
module_app_ai_bard = fields.Boolean("Google Bard Ai")
module_app_ai_baidu = fields.Boolean("Baidu Ai China", help='百度文心一格')
module_app_ai_ali = fields.Boolean("Ali Ai China", help='阿里通义千问')

View File

@@ -0,0 +1,58 @@
# -*- coding: utf-8 -*-
from odoo import fields, models, api
class ResPartner(models.Model):
_inherit = "res.partner"
gpt_id = fields.Many2one('ai.robot', string='Bind to Ai', ondelete='set null')
is_chat_private = fields.Boolean('Allow Chat Private', default=False)
@api.model
def im_search(self, name, limit=20):
users = self.env['res.users'].search([
('id', '!=', self.env.user.id),
('name', 'ilike', name),
('active', '=', True),
('share', '=', False),
('is_chat_private', '=', True)
], order='gpt_id, name, id', limit=limit)
return list(users.partner_id.mail_partner_format().values())
def mail_partner_format(self, fields=None):
# 直接覆盖原生,增加 gpt_id 字段
partners_format = dict()
if not fields:
fields = {'id': True, 'name': True, 'email': True, 'active': True, 'im_status': True, 'gpt_id': 0, 'user': {}}
for partner in self:
data = {}
if 'id' in fields:
data['id'] = partner.id
if 'name' in fields:
name = partner.name
# 英文不好分,暂时不隐名
# if not partner.related_user_id.gpt_id:
# name = partner.name[0] + '*' * (len(partner.name) - 1)
data['name'] = name
if 'email' in fields:
data['email'] = partner.email
if 'active' in fields:
data['active'] = partner.active
if 'im_status' in fields:
data['im_status'] = partner.im_status
if 'gpt_id' in fields:
data['gpt_id'] = partner.gpt_id.id if partner.gpt_id else 0
if 'user' in fields:
internal_users = partner.user_ids - partner.user_ids.filtered('share')
main_user = internal_users[0] if len(internal_users) > 0 else partner.user_ids[0] if len(partner.user_ids) > 0 else self.env['res.users']
data['user'] = {
"id": main_user.id,
"isInternalUser": not main_user.share,
} if main_user else [('clear',)]
# if 'guest' in self.env.context or not self.env.user.has_group('base.group_erp_manager'):
# 完全不显示 邮箱
data.pop('email', None)
partners_format[partner] = data
return partners_format

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
from odoo import fields, models
class ResPartnerAiUse(models.Model):
_name = "res.partner.ai.use"
_description = '消费者Ai使用情况'
name = fields.Many2one('res.partner', 'Partner')
ai_user_id = fields.Many2one('res.users', 'Ai User', domain=[('gpt_id', '!=', False)])
first_ask_time = fields.Datetime('First Ask Time')
latest_ask_time = fields.Datetime('Latest Ask Time')
service_start_date = fields.Datetime('Service Start Date')
service_end_date = fields.Datetime('Service End Date')
used_number = fields.Integer('Number of Used')
max_number = fields.Integer('Max Number of Call')
human_prompt_tokens = fields.Integer('Human Prompt Tokens')
ai_completion_tokens = fields.Integer('AI Completion Tokens')
tokens_total = fields.Integer('Total Tokens')
token_balance = fields.Integer('Token Balance')
# balance = allow - total
token_allow = fields.Integer('Token Allow')

View File

@@ -6,10 +6,12 @@ from odoo import fields, models
class ResUsers(models.Model):
_inherit = "res.users"
gpt_id = fields.Many2one('ai.robot', string='Bind to ChatGpt')
# 改为在 partner中设置用户处绑定
gpt_id = fields.Many2one('ai.robot', string='Bind to Ai', related='partner_id.gpt_id', inherited=True, readonly=False)
gpt_policy = fields.Selection([
('all', 'All Users'),
('limit', 'Selected Users')
], string='Allowed Conversation Mode', default='all', ondelete='set default')
gpt_wl_users = fields.Many2many('res.users', 'res_users_res_users_rel', 'robot_id', 'user_id', string='Allowed Users', domain="[('id', '!=', id)]")
gpt_wl_partners = fields.Many2many('res.partner', 'res_partner_ai_use', 'ai_user_id', 'name', string='Allowed Partners')
gpt_demo_time = fields.Integer('Default Demo Time', default=0)
is_chat_private = fields.Boolean('Allow Chat Private', default=False, related='partner_id.is_chat_private', inherited=True, readonly=False)

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,6 @@
"owned_by": "openai",
"permission": [
{
"id": "modelperm-ZErASyl63fhYUeMMk7QKOHAB",
"object": "model_permission",
"created": 1677691854,
"allow_create_engine": false,

View File

@@ -1,2 +1,4 @@
id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
access_gpt_robt_user,AiRobotUser,model_ai_robot,base.group_user,1,1,1,1
access_gpt_robt_manager,AiRobotUser,model_ai_robot,base.group_erp_manager,1,1,1,1
access_res_partner_ai_use_user,ResPartnerAiUseUser,model_res_partner_ai_use,base.group_user,1,0,0,0
access_res_partner_ai_use_manager,ResPartnerAiUseUser,model_res_partner_ai_use,base.group_erp_manager,1,1,1,1
1 id name model_id:id group_id:id perm_read perm_write perm_create perm_unlink
2 access_gpt_robt_user access_gpt_robt_manager AiRobotUser model_ai_robot base.group_user base.group_erp_manager 1 1 1 1
3 access_res_partner_ai_use_user ResPartnerAiUseUser model_res_partner_ai_use base.group_user 1 0 0 0
4 access_res_partner_ai_use_manager ResPartnerAiUseUser model_res_partner_ai_use base.group_erp_manager 1 1 1 1

View File

@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8" ?>
<odoo noupdate="1">
<record id="res_partner_ai_use_personal_rule" model="ir.rule">
<field name="name">Personal AI Use</field>
<field ref="model_res_partner_ai_use" name="model_id"/>
<field name="domain_force">[('name','=',user.partner_id.id)]</field>
<field name="groups" eval="[(4, ref('base.group_user'))]"/>
</record>
<record id="res_partner_ai_use_see_all" model="ir.rule">
<field name="name">All AI Use</field>
<field ref="model_res_partner_ai_use" name="model_id"/>
<field name="domain_force">[(1,'=',1)]</field>
<field name="groups" eval="[(4, ref('base.group_erp_manager'))]"/>
</record>
</odoo>

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

After

Width:  |  Height:  |  Size: 1.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 89 KiB

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 191 KiB

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 78 KiB

After

Width:  |  Height:  |  Size: 67 KiB

View File

@@ -1,18 +1,18 @@
<section class="oe_container container">
<div class="oe_row oe_spaced" >
<div class="oe_span12">
<h2 class="oe_slogan"> Latest ChatGPT AI Center. GPT 3.5, Dall-E Image.Multi Robot Support. Chat and Training </h2>
<h3 class="oe_slogan"> Support chatgpt 4 image. 3.5 turbo, text-davinci, DALL·E, Integration All ChatGpt Api. </h3>
<div class="row">
<h2 class="oe_slogan"> Latest ChatGPT AI Center. GPT 3.5, Ali Ai, Baidu Ai, Multi Robot Support. Chat and Training </h2>
<h3 class="oe_slogan"> Support chatgpt 4 32k. 3.5 turbo, text-davinci, Integration All ChatGpt Api. </h3>
<div class="oe_row">
<h3>Lastest update: v16.23.07.29</h3>
<div class="oe_span12">
<h3>Lastest update: v16.23.09.22</h3>
<div class="row">
<div class="row">
<img class="" src="bard.gif">
Add google bard support, update chatgpt api</div>
Add Alibaba Qwen support(search 'app_ai_ali'), update chatgpt api
</div>
<img class="oe_demo oe_screenshot img img-fluid" src="demo02.jpg">
</div>
<h3>Lastest update: v16.23.03.16</h3>
<div class="oe_span12">
<div class="row">
<img class="oe_demo oe_screenshot img img-fluid" style="max-height: 100%;" src="banner.png">
</div>
<div class="oe_span12 oe_spaced">
@@ -25,7 +25,7 @@
</li>
<li>
<i class="fa fa-check-square-o text-primary"></i>
2. Multi Ai support including Google Bard Ai, Azure Ai, Chatgpt 4, Chatgpt 3.5 Turbo, Chatgpt 3 Davinci, Chatgpt 2 Code Optimized, 'Dall-E Image.
2. Multi Ai support including Azure Ai, Alibaba Ai, Baidu Ai, Chatgpt 4, Chatgpt 3.5 Turbo, Chatgpt 3 Davinci
</li>
<li>
<i class="fa fa-check-square-o text-primary"></i>
@@ -81,25 +81,21 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">Add more Ai support like google bard, chatgpt 4, baidu china</h1>
<h2 class="bg-warning text-center pt8 pb8">Add more Ai support like Alibaba qwen, chatgpt 4, baidu china</h2>
<h4 class="oe_slogan"> Need to navigate to odoo app store to install addons</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo01.jpg"/>
</div>
<h4 class="oe_slogan">Please apply for the bard api first from google</h4>
<h4 class="oe_slogan">Please apply for the Qwen Api first from Alibaba</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo03.jpg"/>
</div>
<h4 class="oe_slogan">Setup for your own key</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo04.jpg"/>
</div>
</div>
</section>
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">Easy to use Ai Robot with multi Provider. Easy chat, easy help</h1>
<h2 class="bg-warning text-center pt8 pb8">Easy to use Ai Robot with multi Provider. Easy chat, easy help</h2>
<h4 class="oe_slogan"> Open Ai for more smart. Microsoft Azure chatgpt for china user.</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demob.jpg"/>
@@ -109,8 +105,8 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">1. Multi ChatGpt openAI robot Connector. Chat and train.</h1>
<h4 class="oe_slogan"> Goto Setting--> GPT Robot to setup your robot api. </h4>
<h2 class="bg-warning text-center pt8 pb8">1. Multi ChatGpt openAI robot Connector. Chat and train.</h2>
<h4 class="oe_slogan"> Goto Setting--> Ai Robot to setup your robot api. </h4>
<p> Input your api key, And Select the api model you need to use.</p>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo1.jpg"/>
@@ -125,7 +121,7 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">2. Multi Api support, Chatgpt 3.5 Turbo, Chatgpt 3 Davinci, Chatgpt 2 Code Optimized, 'Dall-E Image.</h1>
<h2 class="bg-warning text-center pt8 pb8">2. Multi Api support, Chatgpt 3.5 Turbo, Chatgpt 3 Davinci, Chatgpt 2 Code Optimized</h2>
<h4 class="oe_slogan"> Choose the model you want to use</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo2.jpg"/>
@@ -139,7 +135,7 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">3. Bind ChatGpt Api to user. So we can chat to robot user or use ChatGpt Channel for Group Chat.</h1>
<h2 class="bg-warning text-center pt8 pb8">3. Bind ChatGpt Api to user. So we can chat to robot user or use ChatGpt Channel for Group Chat.</h2>
<h4 class="oe_slogan"> Go Settings ->users, bind chatgpt to some user.</h4>
<img src="demo4.jpg"/>
</div>
@@ -156,8 +152,8 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">4. White and black List for ChatGpt.</h1>
<h1 class="text-danger text-center">5. Setup Demo Chat time for every new user.</h1>
<h2 class="bg-warning text-center pt8 pb8">4. White and black List for ChatGpt.</h2>
<h2 class="bg-warning text-center pt8 pb8">5. Setup Demo Chat time for every new user.</h2>
<h4 class="oe_slogan"> You can set the blacklist to this chatgpt robot to limit request. Also you can setup Demo time for every normal user..</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo6.jpg"/>
@@ -167,7 +163,7 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">6. Easy Start and Stop ChatGpt..</h1>
<h2 class="bg-warning text-center pt8 pb8">6. Easy Start and Stop ChatGpt..</h2>
<h4 class="oe_slogan"> You can easy chat with the apt robot with odoo IM</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo7.jpg"/>
@@ -186,7 +182,7 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">7. Evaluation the ai robot to make better response. This training.</h1>
<h2 class="bg-warning text-center pt8 pb8">7. Evaluation the ai robot to make better response. This training.</h2>
<h4 class="oe_slogan"> You can Evaluation chatgpt's answer. Mark as good for good answer. Mark as back for bad answer.</h4>
<p> With Evaluation, you can make your ai robot more smart.
<div class="oe_demo oe_screenshot img img-fluid">
@@ -197,7 +193,7 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">8. Add api support Connect the Microsoft Azure OpenAI Service.</h1>
<h2 class="bg-warning text-center pt8 pb8">8. Add api support Connect the Microsoft Azure OpenAI Service.</h2>
<h4 class="oe_slogan"> Azure openai add. It is for china and other country which no chatgpt service.</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo81.jpg"/>
@@ -207,7 +203,7 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">9. Can set Synchronous or Asynchronous mode for Ai response.</h1>
<h2 class="bg-warning text-center pt8 pb8">9. Can set Synchronous or Asynchronous mode for Ai response.</h2>
<h4 class="oe_slogan"> Synchronous(default) mode can get response then ask question again. Asynchronous mode would make you do other thing when waiting for response.</h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="demo91.jpg"/>
@@ -217,7 +213,7 @@
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h1 class="text-danger text-center">Multi-language Support..</h1>
<h2 class="bg-warning text-center pt8 pb8">Multi-language Support..</h2>
<h4 class="oe_slogan"> </h4>
<div class="oe_demo oe_screenshot img img-fluid">
<img src="cnreadme.jpg"/>
@@ -225,9 +221,92 @@
</div>
</section>
<section class="oe_container oe_dark">
<!-- begin howto-->
<section class="oe_container container s_text_block o_colored_level pt16 pb16">
<h2 class="text-center bg-info text-white pt16 pb16">- How to setup and use -</h2>
</section>
<section class="oe_container container">
<div class="oe_row oe_spaced">
<h4 class="pt16">
1. Get ChatGPT Api key from openai or azure.
</h4>
<p>Api From Azure, please read </p>
<p>https://www.odooai.cn/blog/odoo-install-deploy-6/chatgpt4-china-application-chatgpt3-5-free-one-year-microsoft-azure-openai-api-registration-tutorial-odoo-aicenter-integration-28 </p>
<p>Api From Alibaba, please read </p>
<p>https://www.odooai.cn/blog/customer-success-10/odoo-ai-ali-tongyi-qianwen-281</p>
<h4 class="pt16">2. Setup your Api information from Settings -- Users --Ai Robot</h4>
<div class="row">
<div class="oe_demo oe_screenshot img img-fluid">
<img src="setup2.jpg">
</div>
</div>
<h4 class="pt16">3. Setup your Api Provider(openai) , api key, End point</h4>
<div class="row">
<div class="oe_demo oe_screenshot img img-fluid">
<img src="setup3.jpg">
</div>
<p> As openai change the api often, sometime you need to check</p>
<p> https://platform.openai.com/docs/introduction</p>
</div>
<h4 class="pt16">4.Bind your Ai Robot (GPT) to a User. We already setup a sample.</h4>
<div class="row">
<div class="oe_demo oe_screenshot img img-fluid">
<img src="setup4.jpg">
</div>
</div>
<h4 class="pt16">5. Bind your Ai User to Channel from Discuss. We already setup a sample.</h4>
<div class="row">
<div class="oe_demo oe_screenshot img img-fluid">
<img src="setup5.jpg">
</div>
</div>
<h5 class="pt16">6. You can setup the Ai system context and Character.</h5>
<div class="row">
<div class="oe_demo oe_screenshot img img-fluid">
<img src="setup6.jpg">
</div>
</div>
<h5 class="pt16">7. You can setup Lots of channel for subjects and projects..</h5>
<div class="row">
<div class="oe_demo oe_screenshot img img-fluid">
<img src="setup7.jpg">
</div>
</div>
</div>
</section>
<!-- end howto-->
<!-- begin video-->
<section class="s_text_block o_colored_level pt16 pb16">
<div class="container s_allow_columns">
<h2 class="text-center bg-info text-white pt16 pb16">- You can Buy our extra apps for website builder, seo builder or multi languages' translator. -</h2>
</div>
</section>
<section class="oe_container container s_text_block pt24 pb24 o_colored_level" data-snippet="s_text_block" data-name="文本">
<div class="s_allow_columns container">
<div data-oe-expression="//www.youtube.com/embed/ntKpCi_Lics?rel=0&amp;autoplay=1" class="media_iframe_video">
<div class="css_editable_mode_display"></div>
<div class="media_iframe_video_size"></div>
<iframe src="//www.youtube.com/embed/ntKpCi_Lics?rel=0&amp;autoplay=1" frameborder="0" allowfullscreen="allowfullscreen"></iframe>
</div>
<p class="o_default_snippet_text"><br></p>
</div>
<div class="oe_row text-center">
<span>Click above Play. or go </span>
<a href="https://www.youtube.com/watch?v=ntKpCi_Lics" target="_blank" role="button" class="btn btn-link btn-lg">Youtube Video of odoo Advance Search Superbar</a>
</div>
<div class="oe_row text-center">
<p class="o_default_snippet_text">Demo Gif Animation if you can not get video</p>
<img class="oe_demo oe_screenshot img img-fluid" src="app_ai_seo.gif">
</div>
</section>
<!-- end video-->
<section class="container oe_dark">
<div class="oe_row oe_spaced text-center">
<div class="oe_span12">
<div class="row">
<h2 class="oe_slogan">Technical Help & Support</h2>
</div>
<div class="col-md-12 pad0">
@@ -256,9 +335,9 @@
</div>
</div>
<div class="oe_row oe_spaced text-center">
<h1>More Powerful addons, Make your odoo very easy to use, easy customize:
<h2>More Powerful addons, Make your odoo very easy to use, easy customize:
<a class="btn btn-primary mb16" href="http://www.odoo.com/apps/modules/browse?author=odooai.cn">odooai.cn Odoo Addons</a>
</h1>
</h2>
</div>
</section>

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

View File

@@ -0,0 +1,22 @@
<?xml version="1.0" encoding="UTF-8" ?>
<templates xml:space="preserve">
<t t-name="app_chatgpt.Message" t-inherit="mail.Message" t-inherit-mode="extension">
<xpath expr="//div[hasclass('o_Message_prettyBody')]//.." position="replace">
<t t-if="!messageView.composerViewInEditing">
<div class="o_Message_prettyBody" t-ref="prettyBody"/><!-- messageView.message.prettyBody is inserted here from _update() -->
<div name="bottom_operation" class="position-relative mt-8">
<div t-if="messageView.message.human_prompt_tokens &gt; 0 or messageView.message.ai_completion_tokens &gt;0"
class="o_Message_token text-muted" style="float:left;display:inline;font-size: 13px;">
<br/>
------------------
<br/>
<span title="提问/答复 消耗Token">
<t t-esc="messageView.message.human_prompt_tokens"/> / <t t-esc="messageView.message.ai_completion_tokens"/>
</span>
</div>
</div>
</t>
</xpath>
</t>
</templates>

View File

@@ -0,0 +1,30 @@
/** @odoo-module **/
import { insert } from '@mail/model/model_field_command';
import { attr, many, one } from '@mail/model/model_field';
import { registerPatch } from '@mail/model/model_core';
registerPatch({
name: 'Message',
modelMethods: {
convertData(data) {
const data2 = this._super(data);
if ('human_prompt_tokens' in data) {
data2.human_prompt_tokens = data.human_prompt_tokens;
}
if ('ai_completion_tokens' in data) {
data2.ai_completion_tokens = data.ai_completion_tokens;
}
if ('is_ai' in data) {
data2.is_ai = data.is_ai;
}
return data2;
},
},
fields: {
human_prompt_tokens: attr(),
ai_completion_tokens: attr(),
is_ai: attr(),
}
})

View File

@@ -21857,55 +21857,6 @@ const emojisData0 = `{
"shortcodes": [
":thumbs_down:"
]
},
{
"category": "ChatGpt Training",
"codepoints": "👐",
"emoticons": [],
"keywords": [
"` + _lt("mark") + `",
"` + _lt("neutral") + `",
"` + _lt("hand") + `",
"` + _lt("open") + `",
"` + _lt("open hands") + `"
],
"name": "` + _lt("mark_as_neutral. open hands") + `",
"shortcodes": [
":open_hands:"
]
},
{
"category": "ChatGpt Training",
"codepoints": "👋",
"emoticons": [],
"keywords": [
"` + _lt("mark") + `",
"` + _lt("redundant") + `",
"` + _lt("hand") + `",
"` + _lt("wave") + `",
"` + _lt("waving") + `"
],
"name": "` + _lt("mark_as_redundant. waving hand") + `",
"shortcodes": [
":waving_hand:"
]
},
{
"category": "ChatGpt Training",
"codepoints": "🈵",
"emoticons": [],
"keywords": [
"` + _lt("mark") + `",
"` + _lt("unhelpful") + `",
"` + _lt("“no vacancy”") + `",
"` + _lt("ideograph") + `",
"` + _lt("Japanese") + `",
"` + _lt("満") + `"
],
"name": "` + _lt("mark_as_unhelpful. Need more answer") + `",
"shortcodes": [
":Japanese_“no_vacancy”_button:"
]
},`;
export const emojisData = JSON.parse(`[

View File

@@ -10,33 +10,112 @@
<field name="provider" optional="hide"/>
<field name="ai_model" optional="show"/>
<field name="openapi_api_key" password="True"/>
<field name="max_tokens" optional="show"/>
<field name="temperature"/>
<field name="max_send_char"/>
</tree>
</field>
</record>
<record id="ai_robot_kanban_view" model="ir.ui.view">
<field name="name">ai.robot.kanban</field>
<field name="model">ai.robot</field>
<field name="arch" type="xml">
<kanban class="o_ai_robot_kanban">
<field name="id"/>
<field name="name"/>
<field name="provider"/>
<field name="set_ai_model"/>
<field name="ai_model"/>
<field name="partner_count"/>
<field name="image_avatar"/>
<field name="active"/>
<templates>
<t t-name="kanban-box">
<div class="oe_kanban_card oe_kanban_global_click">
<div class="o_kanban_image me-1">
<img t-att-src="kanban_image('ai.robot', 'image_avatar', record.id.raw_value)" alt="Robot Provider" class="o_image_64_contain"/>
</div>
<div class="oe_kanban_details">
<div class="o_kanban_record_top mb-0">
<div class="o_kanban_record_headings">
<strong class="o_kanban_record_title">
<field name="name"/>
</strong>
</div>
</div>
<div class="mt-3">
Model:
<field name="ai_model"/>
</div>
<div class="mt-1">
<strong>
<t t-esc="record.partner_count.value"/>
</strong>
Bind Partner
</div>
</div>
</div>
</t>
</templates>
</kanban>
</field>
</record>
<record id="ai_robot_form_view" model="ir.ui.view">
<field name="name">ai.robot.form</field>
<field name="model">ai.robot</field>
<field name="arch" type="xml">
<form>
<header>
<button string="Get List Model" type="object" name="get_ai_list_model" attrs="{'invisible': [('provider', '!=', 'openai')]}"/>
<button string="Get Model Info" type="object" name="get_ai_model_info" attrs="{'invisible': [('provider', '!=', 'openai')]}"/>
</header>
<sheet>
<div class="oe_button_box" name="button_box">
<button class="oe_stat_button" name="%(base.action_res_users)d" icon="fa-users" type="action"
context="{'search_default_gpt_id': id}">
<field name="partner_count" widget="statinfo"/>
</button>
</div>
<field name="image_avatar" widget='image' class="oe_avatar"/>
<div class="oe_title">
<label for="name"/>
<h1>
<field name="name" placeholder="Robot Name" required="1"/>
</h1>
</div>
<group>
<group>
<field name="name"/>
<field name="openapi_api_key" password="True"/>
<field name="id" invisible="1"/>
<field name="openapi_api_key" password="True" required="True"/>
<field name="temperature"/>
<field name="sequence"/>
<field name="top_p"/>
<field name="frequency_penalty"/>
<field name="presence_penalty"/>
<field name="sys_content" placeholder="Role-playing and scene setting.Give the model instructions about how it should behave and any context it should reference when generating a response."/>
<field name="max_send_char"/>
</group>
<group>
<field name="set_ai_model"/>
<field name="ai_model"/>
<label class="o_form_label" for="provider">
OpenAI Document
</label>
<div>
<field name="provider"/>
<field name="provider" class="oe_inline"/>
<a href="https://platform.openai.com/docs/introduction" title="OpenAI Document" class="o_doc_link" target="_blank"></a>
</div>
<field name="max_tokens"/>
<field name="engine"/>
<field name="endpoint"/>
<field name="api_version"/>
<field name="ai_timeout"/>
<field name="sequence"/>
</group>
<group>
<field name="is_filtering"/>
<field name="sensitive_words" attrs="{'invisible': [('is_filtering', '=', False)]}"/>
</group>
</group>
</sheet>
@@ -44,13 +123,28 @@
</field>
</record>
<record id="ai_robot_search_view" model="ir.ui.view">
<field name="name">ai.robot.search</field>
<field name="model">ai.robot</field>
<field name="arch" type="xml">
<search>
<field name="name"/>
<field name="ai_model"/>
<filter string="Archived" name="inactive" domain="[('active', '=', False)]"/>
<group expand="0" name="group_by" string="Group By">
<filter name="groupby_provider" string="Ai Provider" domain="[]" context="{'group_by' : 'provider'}"/>
</group>
</search>
</field>
</record>
<record id="action_ai_robot" model="ir.actions.act_window">
<field name="name">GPT Robot</field>
<field name="name">Ai Robot</field>
<field name="res_model">ai.robot</field>
<field name="view_mode">tree,form</field>
<field name="view_mode">kanban,tree,form</field>
<field name="help" type="html">
<p class="o_view_nocontent_smiling_face">
Let's create a GPT Robot.
Let's create a Ai Robot.
</p>
</field>
</record>
@@ -59,14 +153,14 @@
<field name="name">Disconnect</field>
<field name="model_id" ref="app_chatgpt.model_ai_robot"/>
<field name="binding_model_id" ref="app_chatgpt.model_ai_robot"/>
<field name="binding_view_types">list,form</field>
<field name="binding_view_types">kanban,list,form</field>
<field name="state">code</field>
<field name="code">action = records.action_disconnect()</field>
</record>
<menuitem
id="menu_ai_robot"
name="GPT Robot"
name="Ai Robot"
parent="base.menu_users"
sequence="2"
action="action_ai_robot"

View File

@@ -0,0 +1,122 @@
<?xml version="1.0"?>
<odoo>
<data>
<!--list 原生处理-->
<record id="ai_mail_channel_view_tree" model="ir.ui.view">
<field name="model">mail.channel</field>
<field name="name">ai.mail.channel.tree</field>
<field name="inherit_id" ref="mail.mail_channel_view_tree"/>
<field name="arch" type="xml">
<xpath expr="//field[@name='name']" position="after">
<field name="ai_partner_id" optional="show"/>
<field name="description" optional="show"/>
<field name="set_max_tokens" optional="hide"/>
</xpath>
</field>
</record>
<!-- form,原生继承以便管理-->
<record id="ai_mail_channel_view_form" model="ir.ui.view">
<field name="name">ai.mail.channel.form</field>
<field name="model">mail.channel</field>
<field name="mode">extension</field>
<field name="inherit_id" ref="mail.mail_channel_view_form"/>
<field name="arch" type="xml">
<xpath expr="//page[@name='privacy']" position="before">
<page name="page_user" string="用户设定的角色相关,一般不要调整">
<group>
<group name="role_set" string="Ai常规设定">
<field name="channel_type" readonly="1"/>
<field name="ai_partner_id"
options="{'no_open': True, 'no_create': True}"/>
<field name="ext_ai_partner_id"
options="{'no_open': True, 'no_create': True}"/>
</group>
<group name="param_set" string="Ai Character Set">
<div class="o_td_label">
<label for="set_max_tokens"/>
</div>
<field name="set_max_tokens" nolabel="1" required="1"/>
<div class="o_td_label">
<label for="set_chat_count"/>
</div>
<field name="set_chat_count" nolabel="1" required="1"/>
<div class="o_td_label">
<label for="set_temperature"/>
</div>
<field name="set_temperature" nolabel="1" required="1"/>
<div class="o_td_label">
<label for="set_top_p"/>
</div>
<field name="set_top_p" nolabel="1" required="1"/>
<div class="o_td_label">
<label for="set_frequency_penalty"/>
</div>
<field name="set_frequency_penalty" nolabel="1" required="1"/>
<div class="o_td_label">
<label for="set_presence_penalty"/>
</div>
<field name="set_presence_penalty" nolabel="1" required="1"/>
<field name="is_private" readonly="0"/>
<field name="create_uid" readonly="1" options="{'no_open': True, 'no_create': True}"/>
</group>
</group>
</page>
</xpath>
<xpath expr="//field[@name='group_public_id']/.." position="after">
<group string="Ai智能优化设定具体参数">
<field name="chat_count"/>
<p class="ml16 my-n1 mb16" colspan="2">0-3设定后会将最近n次对话发给Ai有助于他更好的回答</p>
<field name="max_tokens"/>
<p class="ml16 my-n1 mb16" colspan="2">最大响应Token,控制返回内容长度</p>
<field name="temperature"/>
<p class="ml16 my-n1 mb16" colspan="2">0-1值越大越富有想像力越小则越保守</p>
<field name="frequency_penalty"/>
<p class="ml16 my-n1 mb16" colspan="2">0-1值越大越少使用常用词</p>
<field name="presence_penalty"/>
<p class="ml16 my-n1 mb16" colspan="2">0-1值越大越少重复词</p>
</group>
</xpath>
</field>
</record>
<!--kanban,原生的处理-->
<record id="ai_mail_channel_view_kanban" model="ir.ui.view">
<field name="model">mail.channel</field>
<field name="inherit_id" ref="mail.mail_channel_view_kanban"/>
<field name="mode">extension</field>
<field name="arch" type="xml">
<xpath expr="//kanban//field[1]" position="before">
<field name="is_private"/>
</xpath>
<xpath expr="//field[@name='description']" position="before">
<em>角色:</em>
</xpath>
<xpath expr="//button[@name='channel_join']" position="replace">
<button attrs="{'invisible':[('is_private','=',True), ('group_ids', '!=', [])]}"
class="btn btn-warning float-end" type="edit">
智能设定
</button>
<button type="object" attrs="{'invisible':['|', ('is_member','=',True), ('group_ids', '!=', [])]}" class="btn btn-primary float-end" name="channel_join">进入频道</button>
</xpath>
<xpath expr="//button[@name='action_unfollow']" position="replace">
<button type="object" attrs="{'invisible':['|', ('is_member','=',False), ('group_ids', '!=', [])]}"
class="btn btn-secondary float-end" name="action_unfollow">暂时离开</button>
</xpath>
</field>
</record>
<!--search 原生处理-->
<record id="ai_mail_channel_view_search" model="ir.ui.view">
<field name="model">mail.channel</field>
<field name="inherit_id" ref="mail.mail_channel_view_search"/>
<field name="arch" type="xml">
<xpath expr="//field[@name='name']" position="after">
<field name="channel_type"/>
<group expand="0" string="Group By">
<filter string="Channel Type" name="groupby_channel_type" domain="[]" context="{'group_by': 'channel_type'}"/>
</group>
</xpath>
</field>
</record>
</data>
</odoo>

View File

@@ -6,19 +6,46 @@
<field name="inherit_id" ref="base_setup.res_config_settings_view_form"/>
<field name="arch" type="xml">
<xpath expr="//div[@name='integration']" position="after">
<h2>ChatGPT</h2>
<div class="col-xs-12 row o_settings_container" id="chatgpt_integraion">
<div class="col-xs-12 col-md-10 o_setting_box">
<div class="o_setting_right_pane border-start-0">
<div class="content-group">
<div class="row mt0">
<label class="col-lg-2" string="上下文连接超时" for="openapi_context_timeout"/>
<field name="openapi_context_timeout" title="上下文连接超时 秒数" style="width: 10% !important;"/>
</div>
</div>
</div>
</div>
<h2>Ai Center Setup</h2>
<div class="col-xs-12 col-md-6 row o_settings_container" id="chatgpt_integration">
<div class="col-12 o_web_settings_compact_subtitle">
Support by
<a target="_blank" href="https://www.odooai.cn" style="text-decoration: underline;">odooAi.cn</a>
</div>
<div class="col-12 col-lg-12 o_setting_box" id="ai_base_setting">
<div class="o_setting_right_pane border-start-0">
<div class="content-group">
<div class="o_web_settings_compact_subtitle">
<label class="col-4" string="Timout then disconnect(s)" for="openapi_context_timeout"/>
<field name="openapi_context_timeout" title="After timeout seconds then Disconnect" style="width: 200px !important;"/>Seconds
</div>
<div class="row mt0">
<label class="col-4" for="openai_sync_config"/>
<field name="openai_sync_config" style="width: 200px !important;"/>
</div>
</div>
</div>
</div>
<div class="col-lg-6 col-12 o_setting_box" id="ai_google_integraion">
<div class="o_setting_left_pane">
<field name="module_app_ai_bard"/>
</div>
<div class="o_setting_right_pane">
<label for="module_app_ai_bard"/>
<a href="https://apps.odoo.com/apps/modules/15.0/app_ai_bard/"
title="Get Google Bard Ai from https://apps.odoo.com/apps/modules/15.0/app_ai_bard" class="o_doc_link ml8" target="_blank"></a>
</div>
</div>
<div class="col-lg-6 col-12 o_setting_box" id="ai_baidu_integraion">
<div class="o_setting_left_pane">
<field name="module_app_ai_baidu"/>
</div>
<div class="o_setting_right_pane">
<label for="module_app_ai_baidu"/>
<a href="https://apps.odoo.com/apps/modules/15.0/app_ai_baidu/"
title="Get Baidu Ai from https://apps.odoo.com/apps/modules/15.0/app_ai_baidu" class="o_doc_link ml8" target="_blank"></a>
</div>
</div>
</div>
</xpath>
</field>

View File

@@ -0,0 +1,94 @@
<?xml version="1.0" encoding="UTF-8" ?>
<odoo>
<record id="res_partner_ai_use_tree_view" model="ir.ui.view">
<field name="name">res.partner.ai.use.tree</field>
<field name="model">res.partner.ai.use</field>
<field name="arch" type="xml">
<tree>
<field name="name"/>
<field name="ai_user_id" optional="show"/>
<field name="first_ask_time" optional="show"/>
<field name="latest_ask_time" optional="show"/>
<field name="service_start_date" optional="show"/>
<field name="service_end_date" optional="show"/>
<field name="used_number" sum="Total" optional="hide"/>
<field name="max_number" sum="Total" optional="hide"/>
<field name="human_prompt_tokens" sum="Total" optional="show"/>
<field name="ai_completion_tokens" sum="Total" optional="show"/>
<field name="tokens_total" sum="Total" optional="show"/>
<field name="token_balance" sum="Total" optional="show"/>
<field name="token_allow" sum="Total" optional="show"/>
</tree>
</field>
</record>
<record id="res_partner_ai_use_form_view" model="ir.ui.view">
<field name="name">res.partner.ai.use.form</field>
<field name="model">res.partner.ai.use</field>
<field name="arch" type="xml">
<form>
<sheet>
<label for="name"/>
<h1>
<field name="name"/>
</h1>
<group>
<group>
<field name="ai_user_id"/>
<field name="first_ask_time"/>
<field name="latest_ask_time"/>
<field name="service_start_date"/>
<field name="service_end_date"/>
<field name="used_number" readonly="True"/>
<field name="max_number" readonly="True"/>
<field name="token_balance" readonly="True"/>
</group>
<group>
<field name="human_prompt_tokens" readonly="True"/>
<field name="ai_completion_tokens" readonly="True"/>
<field name="tokens_total" readonly="True"/>
</group>
</group>
</sheet>
</form>
</field>
</record>
<record id="res_partner_ai_use_search_view" model="ir.ui.view">
<field name="name">res.partner.ai.use.search</field>
<field name="model">res.partner.ai.use</field>
<field name="arch" type="xml">
<search>
<field name="name"/>
<field name="ai_user_id"/>
<searchpanel>
<field name="ai_user_id"/>
</searchpanel>
</search>
</field>
</record>
<record id="action_res_partner_ai_use" model="ir.actions.act_window">
<field name="name">Partner Ai Use</field>
<field name="res_model">res.partner.ai.use</field>
<field name="view_mode">tree,form</field>
<field name="context">{'create': 0, 'delete': 0}</field>
</record>
<record id="action_res_users_2_partner_ai_use" model="ir.actions.act_window">
<field name="name">Partner Ai Use</field>
<field name="res_model">res.partner.ai.use</field>
<field name="view_mode">tree,form</field>
<field name="domain">[('ai_user_id', 'in', active_ids)]</field>
<field name="context">{'default_ai_user_id':active_id,}</field>
</record>
<menuitem
id="menu_res_partner_ai_use"
name="Partner Ai Use"
parent="base.menu_users"
sequence="3"
action="action_res_partner_ai_use"
groups="base.group_system"/>
</odoo>

View File

@@ -5,13 +5,17 @@
<field name="model">res.users</field>
<field name="inherit_id" ref="base.view_users_form"/>
<field name="arch" type="xml">
<xpath expr="//div[@name='button_box']" position="inside">
<button name="%(app_chatgpt.action_res_users_2_partner_ai_use)d" type="action" string="Partner Ai Use" icon="fa-comments">
</button>
</xpath>
<xpath expr="//page[@name='preferences']" position="after">
<page name="page_chatgpt" string="ChatGPT">
<group>
<group>
<field name="gpt_id"/>
<field name="gpt_policy"/>
<field name="gpt_wl_users" widget="many2many_tags" attrs="{'invisible': [('gpt_policy', '=', 'all')]}"/>
<field name="gpt_wl_partners" widget="many2many_tags" attrs="{'invisible': [('gpt_policy', '=', 'all')]}"/>
<field name="gpt_demo_time"/>
</group>
</group>
@@ -19,4 +23,20 @@
</xpath>
</field>
</record>
<!-- search-->
<record id="app_view_users_search" model="ir.ui.view">
<field name="name">app.res.users.search</field>
<field name="model">res.users</field>
<field name="inherit_id" ref="base.view_users_search"/>
<field name="arch" type="xml">
<xpath expr="//field[@name='name']" position="after">
<field name="gpt_id"/>
</xpath>
<xpath expr="//filter[@name='filter_no_share']" position="before">
<filter name="is_robot" string="Ai User" domain="[('gpt_id','!=',False)]"/>
<filter name="not_robot" string="Not Ai" domain="[('gpt_id','=',False)]"/>
<separator/>
</xpath>
</field>
</record>
</odoo>