mirror of
https://github.com/guohuadeng/app-odoo.git
synced 2025-02-23 04:11:36 +02:00
update to 15 gpt
This commit is contained in:
@@ -1,8 +1,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
|
||||
|
||||
from . import res_partner
|
||||
from . import mail_channel
|
||||
from . import res_config_settings
|
||||
from . import ai_robot
|
||||
from . import res_partner_ai_use
|
||||
from . import res_users
|
||||
from . import mail_message
|
||||
from . import mail_thread
|
||||
|
||||
@@ -1,25 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import requests
|
||||
from odoo import api, fields, models, _
|
||||
import openai.openai_object
|
||||
import requests, json
|
||||
import openai
|
||||
import base64
|
||||
|
||||
from odoo import api, fields, models, modules, tools, _
|
||||
from odoo.exceptions import UserError
|
||||
from .lib.WordsSearch import WordsSearch
|
||||
|
||||
import logging
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AiRobot(models.Model):
|
||||
_name = 'ai.robot'
|
||||
_description = 'Gpt Robot'
|
||||
_description = 'Ai Robot'
|
||||
_order = 'sequence, name'
|
||||
|
||||
name = fields.Char(string='Name', translate=True)
|
||||
provider = fields.Selection(string="AI Provider", selection=[('openai', 'OpenAI')], required=True, default='openai')
|
||||
ai_model = fields.Selection(string="AI Model", selection=[
|
||||
name = fields.Char(string='Name', translate=True, required=True)
|
||||
provider = fields.Selection(string="AI Provider", selection=[('openai', 'OpenAI'), ('azure', 'Azure')],
|
||||
required=True, default='openai', change_default=True)
|
||||
# update ai_robot set ai_model=set_ai_model
|
||||
ai_model = fields.Char(string="AI Model", required=True, default='auto', help='Customize input')
|
||||
set_ai_model = fields.Selection(string="Quick Set Model", selection=[
|
||||
('gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0613(Default and Latest)'),
|
||||
('gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-16k-0613(Big text)'),
|
||||
('gpt-4', 'Chatgpt 4'),
|
||||
('gpt-4-32k', 'Chatgpt 4 32k'),
|
||||
('gpt-3.5-turbo', 'Chatgpt 3.5 Turbo'),
|
||||
('gpt-3.5-turbo-0301', 'Chatgpt 3.5 Turbo on 20230301'),
|
||||
('text-davinci-003', 'Chatgpt 3 Davinci'),
|
||||
('code-davinci-002', 'Chatgpt 2 Code Optimized'),
|
||||
('text-davinci-002', 'Chatgpt 2 Davinci'),
|
||||
('dall-e2', 'Dall-E Image'),
|
||||
], required=True, default='gpt-3.5-turbo',
|
||||
help="""
|
||||
], default='gpt-3.5-turbo-0613',
|
||||
help="""
|
||||
GPT-4: Can understand Image, generate natural language or code.
|
||||
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
|
||||
DALL·E: A model that can generate and edit images given a natural language prompt
|
||||
Whisper: A model that can convert audio into text
|
||||
@@ -29,11 +46,440 @@ Moderation: A fine-tuned model that can detect whether text may be sensitive or
|
||||
GPT-3 A set of models that can understand and generate natural language
|
||||
""")
|
||||
openapi_api_key = fields.Char(string="API Key", help="Provide the API key here")
|
||||
temperature = fields.Float(string='Temperature', default=0.9)
|
||||
|
||||
|
||||
# begin gpt 参数
|
||||
# 1. stop:表示聊天机器人停止生成回复的条件,可以是一段文本或者一个列表,当聊天机器人生成的回复中包含了这个条件,就会停止继续生成回复。
|
||||
# 2. temperature:0-2,控制回复的“新颖度”,值越高,聊天机器人生成的回复越不确定和随机,值越低,聊天机器人生成的回复会更加可预测和常规化。
|
||||
# 3. top_p:0-1,语言连贯性,与temperature有些类似,也是控制回复的“新颖度”。不同的是,top_p控制的是回复中概率最高的几个可能性的累计概率之和,值越小,生成的回复越保守,值越大,生成的回复越新颖。
|
||||
# 4. frequency_penalty:-2~2,用于控制聊天机器人回复中出现频率过高的词汇的惩罚程度。聊天机器人会尝试避免在回复中使用频率较高的词汇,以提高回复的多样性和新颖度。
|
||||
# 5. presence_penalty:-2~2与frequency_penalty相对,用于控制聊天机器人回复中出现频率较低的词汇的惩罚程度。聊天机器人会尝试在回复中使用频率较低的词汇,以提高回复的多样性和新颖度。
|
||||
max_tokens = fields.Integer('Max Response', default=600,
|
||||
help="""
|
||||
Set a limit on the number of tokens per model response.
|
||||
The API supports a maximum of 4000 tokens shared between the prompt
|
||||
(including system message, examples, message history, and user query) and the model's response.
|
||||
One token is roughly 4 characters for typical English text.
|
||||
""")
|
||||
temperature = fields.Float(string='Temperature', default=1,
|
||||
help="""
|
||||
Controls randomness. Lowering the temperature means that the model will produce
|
||||
more repetitive and deterministic responses.
|
||||
Increasing the temperature will result in more unexpected or creative responses.
|
||||
Try adjusting temperature or Top P but not both.
|
||||
""")
|
||||
top_p = fields.Float('Top Probabilities', default=0.6,
|
||||
help="""
|
||||
Similar to temperature, this controls randomness but uses a different method.
|
||||
Lowering Top P will narrow the model’s token selection to likelier tokens.
|
||||
Increasing Top P will let the model choose from tokens with both high and low likelihood.
|
||||
Try adjusting temperature or Top P but not both
|
||||
""")
|
||||
# 避免使用常用词
|
||||
frequency_penalty = fields.Float('Frequency Penalty', default=1,
|
||||
help="""
|
||||
Reduce the chance of repeating a token proportionally based on how often it has appeared in the text so far.
|
||||
This decreases the likelihood of repeating the exact same text in a response.
|
||||
""")
|
||||
# 越大模型就趋向于生成更新的话题,惩罚已经出现过的文本
|
||||
presence_penalty = fields.Float('Presence penalty', default=1,
|
||||
help="""
|
||||
Reduce the chance of repeating any token that has appeared in the text at all so far.
|
||||
This increases the likelihood of introducing new topics in a response.
|
||||
""")
|
||||
# 停止回复的关键词
|
||||
stop = fields.Char('Stop sequences',
|
||||
help="""
|
||||
Use , to separate the stop key word.
|
||||
Make responses stop at a desired point, such as the end of a sentence or list.
|
||||
Specify up to four sequences where the model will stop generating further tokens in a response.
|
||||
The returned text will not contain the stop sequence.
|
||||
""")
|
||||
# 角色设定
|
||||
sys_content = fields.Char('System message',
|
||||
help="""
|
||||
Give the model instructions about how it should behave and any context it should reference when generating a response.
|
||||
You can describe the assistant’s personality,
|
||||
tell it what it should and shouldn’t answer, and tell it how to format responses.
|
||||
There’s no token limit for this section, but it will be included with every API call,
|
||||
so it counts against the overall token limit.
|
||||
""")
|
||||
# end gpt 参数
|
||||
endpoint = fields.Char('End Point', default='https://api.openai.com/v1/chat/completions')
|
||||
engine = fields.Char('Engine', help='If use Azure, Please input the Model deployment name.')
|
||||
api_version = fields.Char('API Version', default='2022-12-01')
|
||||
ai_timeout = fields.Integer('Timeout(seconds)', help="Connect timeout for Ai response", default=120)
|
||||
sequence = fields.Integer('Sequence', help="Determine the display order", default=10)
|
||||
sensitive_words = fields.Text('Sensitive Words Plus', help='Sensitive word filtering. Separate keywords with a carriage return.')
|
||||
is_filtering = fields.Boolean('Filter Sensitive Words', default=False, help='Use base Filter in dir models/lib/sensi_words.txt')
|
||||
|
||||
max_send_char = fields.Integer('Max Send Char', help='Max Send Prompt Length', default=8000)
|
||||
image_avatar = fields.Image('Avatar')
|
||||
partner_ids = fields.One2many('res.partner', 'gpt_id', string='Partner')
|
||||
partner_count = fields.Integer('#Partner', compute='_compute_partner_count', store=False)
|
||||
active = fields.Boolean('Active', default=True)
|
||||
|
||||
def _compute_partner_count(self):
|
||||
for rec in self:
|
||||
rec.partner_count = len(rec.partner_ids)
|
||||
|
||||
def action_disconnect(self):
|
||||
requests.delete('https://chatgpt.com/v1/disconnect')
|
||||
|
||||
def get_ai_pre(self, data, author_id=False, answer_id=False, param={}):
|
||||
if self.is_filtering:
|
||||
search = WordsSearch()
|
||||
search.SetKeywords([])
|
||||
if isinstance(data, list):
|
||||
content = data[len(data)-1]['content']
|
||||
else:
|
||||
content = data
|
||||
sensi = search.FindFirst(content)
|
||||
if sensi is not None:
|
||||
_logger.error('==========敏感词:%s' % sensi['Keyword'])
|
||||
return _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。')
|
||||
elif not author_id.gpt_id and answer_id.gpt_id:
|
||||
user_id = answer_id.user_ids[:1]
|
||||
gpt_policy = user_id.gpt_policy
|
||||
gpt_wl_partners = user_id.gpt_wl_partners
|
||||
is_allow = author_id.id in gpt_wl_partners.ids
|
||||
if gpt_policy != 'all' and not is_allow:
|
||||
# 暂时有限用户的Ai
|
||||
return _('此Ai暂时未开放,请联系管理员。')
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_ai(self, data, author_id=False, answer_id=False, param={}):
|
||||
# 通用方法
|
||||
# author_id: 请求的 partner_id 对象
|
||||
# answer_id: 回答的 partner_id 对象
|
||||
# param,dict 形式的参数
|
||||
# 调整输出为2个参数:res_post详细内容,is_ai是否ai的响应
|
||||
|
||||
self.ensure_one()
|
||||
# 前置勾子,一般返回 False,有问题返回响应内容,用于处理敏感词等
|
||||
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
|
||||
if res_pre:
|
||||
# 有错误内容,则返回上级内容及 is_ai为假
|
||||
return res_pre, {}, False
|
||||
if not hasattr(self, 'get_%s' % self.provider):
|
||||
res = _('No robot provider found')
|
||||
return res, {}, False
|
||||
|
||||
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
|
||||
# 后置勾子,返回处理后的内容
|
||||
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
|
||||
return res_post, usage, is_ai
|
||||
|
||||
def get_ai_origin(self, data, author_id=False, answer_id=False, param={}):
|
||||
# 通用方法
|
||||
# author_id: 请求的 partner_id 对象
|
||||
# answer_id: 回答的 partner_id 对象
|
||||
# param,dict 形式的参数
|
||||
# 调整输出为2个参数:res_post详细内容,is_ai是否ai的响应
|
||||
|
||||
self.ensure_one()
|
||||
# 前置勾子,一般返回 False,有问题返回响应内容,用于处理敏感词等
|
||||
res_pre = self.get_ai_pre(data, author_id, answer_id, param)
|
||||
if res_pre:
|
||||
# 有错误内容,则返回上级内容及 is_ai为假
|
||||
return res_pre, {}, False
|
||||
if not hasattr(self, 'get_%s' % self.provider):
|
||||
res = _('No robot provider found')
|
||||
return res, {}, False
|
||||
|
||||
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, param)
|
||||
# 后置勾子,返回处理后的内容
|
||||
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
|
||||
return res
|
||||
|
||||
def get_ai_post(self, res, author_id=False, answer_id=False, param={}):
|
||||
if res and author_id and isinstance(res, openai.openai_object.OpenAIObject) or isinstance(res, list) or isinstance(res, dict):
|
||||
# 返回是个对象,那么就是ai
|
||||
# if isinstance(res, dict):
|
||||
if self.provider == 'openai':
|
||||
# openai 格式处理
|
||||
usage = res['usage']
|
||||
content = res['choices'][0]['message']['content']
|
||||
# _logger.warning('===========Ai响应:%s' % content)
|
||||
elif self.provider == 'azure':
|
||||
# azure 格式
|
||||
usage = json.loads(json.dumps(res['usage']))
|
||||
content = json.loads(json.dumps(res['choices'][0]['message']['content']))
|
||||
elif self.provider == 'ali':
|
||||
# ali 格式
|
||||
usage = res['usage']
|
||||
content = res['output']['text']
|
||||
elif self.provider == 'baidu':
|
||||
# baidu 格式
|
||||
usage = res['usage']
|
||||
content = res['result']
|
||||
else:
|
||||
usage = False
|
||||
content = res
|
||||
data = content.replace(' .', '.').strip()
|
||||
answer_user = answer_id.mapped('user_ids')[:1]
|
||||
if usage:
|
||||
if self.provider == 'ali':
|
||||
prompt_tokens = usage['input_tokens']
|
||||
completion_tokens = usage['output_tokens']
|
||||
total_tokens = usage['input_tokens'] + usage['output_tokens']
|
||||
else:
|
||||
prompt_tokens = usage['prompt_tokens']
|
||||
completion_tokens = usage['completion_tokens']
|
||||
total_tokens = usage['total_tokens']
|
||||
# 不是写到 user ,是要写到指定 m2m 相关模型, 如: res.partner.ai.use
|
||||
ai_use = self.env['res.partner.ai.use'].search([('name', '=', author_id.id)], limit=1)
|
||||
ask_date = fields.Datetime.now()
|
||||
if not ai_use:
|
||||
ai_use.create({
|
||||
'name': author_id.id,
|
||||
'ai_user_id': answer_user.id,
|
||||
'human_prompt_tokens': prompt_tokens,
|
||||
'ai_completion_tokens': completion_tokens,
|
||||
'tokens_total': total_tokens,
|
||||
'used_number': 1,
|
||||
'first_ask_time': ask_date,
|
||||
'latest_ask_time': ask_date
|
||||
})
|
||||
else:
|
||||
vals = {
|
||||
'human_prompt_tokens': ai_use.human_prompt_tokens + prompt_tokens,
|
||||
'ai_completion_tokens': ai_use.ai_completion_tokens + completion_tokens,
|
||||
'tokens_total': ai_use.tokens_total + total_tokens,
|
||||
'used_number': ai_use.used_number + 1,
|
||||
'latest_ask_time': ask_date
|
||||
}
|
||||
if not ai_use.first_ask_time:
|
||||
vals.update({
|
||||
'first_ask_time': ask_date
|
||||
})
|
||||
ai_use.write(vals)
|
||||
return data, usage, True
|
||||
else:
|
||||
# 直接返回错误语句,那么就是非ai
|
||||
return res, False, False
|
||||
|
||||
def get_ai_system(self, content=None):
|
||||
# 获取基础ai角色设定, role system
|
||||
sys_content = content or self.sys_content
|
||||
if sys_content:
|
||||
return {"role": "system", "content": sys_content}
|
||||
return {}
|
||||
|
||||
def get_ai_model_info(self):
|
||||
self.ensure_one()
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||
R_TIMEOUT = self.ai_timeout or 120
|
||||
o_url = "https://api.openai.com/v1/models/%s" % self.ai_model
|
||||
if self.endpoint:
|
||||
o_url = self.endpoint.replace("/chat/completions", "") + "/models/%s" % self.ai_model
|
||||
|
||||
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
|
||||
response.close()
|
||||
if response:
|
||||
res = response.json()
|
||||
r_text = json.dumps(res, indent=2)
|
||||
else:
|
||||
r_text = 'No response.'
|
||||
raise UserError(r_text)
|
||||
|
||||
def get_ai_list_model(self):
|
||||
self.ensure_one()
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||
R_TIMEOUT = self.ai_timeout or 120
|
||||
o_url = "https://api.openai.com/v1/models"
|
||||
if self.endpoint:
|
||||
o_url = self.endpoint.replace("/chat/completions", "") + "/models"
|
||||
response = requests.get(o_url, headers=headers, timeout=R_TIMEOUT)
|
||||
response.close()
|
||||
if response:
|
||||
res = response.json()
|
||||
r_text = json.dumps(res, indent=2)
|
||||
else:
|
||||
r_text = 'No response.'
|
||||
raise UserError(r_text)
|
||||
|
||||
def get_openai(self, data, author_id, answer_id, param={}):
|
||||
self.ensure_one()
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||
R_TIMEOUT = self.ai_timeout or 120
|
||||
o_url = self.endpoint or "https://api.openai.com/v1/chat/completions"
|
||||
|
||||
# 处理传参,传过来的优先于 robot 默认的
|
||||
max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
|
||||
temperature = param.get('temperature') if param.get('temperature') else self.temperature
|
||||
top_p = param.get('top_p') if param.get('top_p') else self.top_p
|
||||
frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
|
||||
presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
|
||||
request_timeout = param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
|
||||
|
||||
if self.stop:
|
||||
stop = self.stop.split(',')
|
||||
else:
|
||||
stop = ["Human:", "AI:"]
|
||||
# 以下处理 open ai
|
||||
if self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||
# 基本与 azure 同,要处理 api_base
|
||||
openai.api_key = self.openapi_api_key
|
||||
openai.api_base = o_url.replace('/chat/completions', '')
|
||||
if isinstance(data, list):
|
||||
messages = data
|
||||
else:
|
||||
messages = [{"role": "user", "content": data}]
|
||||
# Ai角色设定,如果没设定则再处理
|
||||
if messages[0].get('role') != 'system':
|
||||
sys_content = self.get_ai_system(param.get('sys_content'))
|
||||
if sys_content:
|
||||
messages.insert(0, sys_content)
|
||||
# todo: 当前反向代理方式不通,要调整为 远程主机中接受请求,post到openai,再将结果返回给请求者
|
||||
# response = openai.ChatCompletion.create(
|
||||
# model=self.ai_model,
|
||||
# messages=messages,
|
||||
# # 返回的回答数量
|
||||
# n=1,
|
||||
# max_tokens=max_tokens,
|
||||
# temperature=temperature,
|
||||
# top_p=top_p,
|
||||
# frequency_penalty=frequency_penalty,
|
||||
# presence_penalty=presence_penalty,
|
||||
# stop=stop,
|
||||
# request_timeout=request_timeout,
|
||||
# )
|
||||
# if 'choices' in response:
|
||||
# return response
|
||||
# todo: 两种方式一样,要调整 v 服务器的二次处理 /root/toai.py
|
||||
pdata = {
|
||||
"model": self.ai_model,
|
||||
"messages": messages,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
"frequency_penalty": frequency_penalty,
|
||||
"presence_penalty": presence_penalty,
|
||||
"stop": stop
|
||||
}
|
||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||
try:
|
||||
res = response.json()
|
||||
if 'choices' in res:
|
||||
return res
|
||||
except Exception as e:
|
||||
_logger.warning("Get Response Json failed: %s", e)
|
||||
else:
|
||||
_logger.warning('=====================Openai output data: %s' % response.json())
|
||||
elif self.ai_model == 'dall-e2':
|
||||
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
||||
# image_url = response['data'][0]['url']
|
||||
# https://platform.openai.com/docs/guides/images/introduction
|
||||
pdata = {
|
||||
"prompt": data,
|
||||
"n": 3,
|
||||
"size": "1024x1024",
|
||||
}
|
||||
return '建设中'
|
||||
else:
|
||||
pdata = {
|
||||
"model": self.ai_model,
|
||||
"prompt": data,
|
||||
"temperature": 1,
|
||||
"max_tokens": max_tokens,
|
||||
"top_p": 0.6,
|
||||
"frequency_penalty": 0.1,
|
||||
"presence_penalty": 0.1,
|
||||
"stop": stop
|
||||
}
|
||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||
res = response.json()
|
||||
if 'choices' in res:
|
||||
res = '\n'.join([x['text'] for x in res['choices']])
|
||||
return res
|
||||
|
||||
return _("Response Timeout, please speak again.")
|
||||
|
||||
def get_azure(self, data, author_id, answer_id, param={}):
|
||||
self.ensure_one()
|
||||
# only for azure
|
||||
openai.api_type = self.provider
|
||||
if not self.endpoint:
|
||||
raise UserError(_("Please Set your AI robot's endpoint first."))
|
||||
openai.api_base = self.endpoint
|
||||
if not self.api_version:
|
||||
raise UserError(_("Please Set your AI robot's API Version first."))
|
||||
openai.api_version = self.api_version
|
||||
openai.api_key = self.openapi_api_key
|
||||
if self.stop:
|
||||
stop = self.stop.split(',')
|
||||
else:
|
||||
stop = ["Human:", "AI:"]
|
||||
if isinstance(data, list):
|
||||
messages = data
|
||||
else:
|
||||
messages = [{"role": "user", "content": data}]
|
||||
|
||||
# 处理传参,传过来的优先于 robot 默认的
|
||||
max_tokens = param.get('max_tokens') if param.get('max_tokens') else self.max_tokens
|
||||
temperature = param.get('temperature') if param.get('temperature') else self.temperature
|
||||
top_p = param.get('top_p') if param.get('top_p') else self.top_p
|
||||
frequency_penalty = param.get('frequency_penalty') if param.get('frequency_penalty') else self.frequency_penalty
|
||||
presence_penalty = param.get('presence_penalty') if param.get('presence_penalty') else self.presence_penalty
|
||||
request_timeout= param.get('request_timeout') if param.get('request_timeout') else self.ai_timeout
|
||||
|
||||
# Ai角色设定,如果没设定则再处理
|
||||
if messages[0].get('role') != 'system':
|
||||
sys_content = self.get_ai_system(param.get('sys_content'))
|
||||
if sys_content:
|
||||
messages.insert(0, sys_content)
|
||||
# 暂时不变
|
||||
response = openai.ChatCompletion.create(
|
||||
engine=self.engine,
|
||||
messages=messages,
|
||||
# 返回的回答数量
|
||||
n=1,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
frequency_penalty=frequency_penalty,
|
||||
presence_penalty=presence_penalty,
|
||||
stop=None,
|
||||
request_timeout=request_timeout,
|
||||
)
|
||||
if 'choices' in response:
|
||||
return response
|
||||
else:
|
||||
_logger.warning('=====================azure output data: %s' % response.json())
|
||||
return _("Response Timeout, please speak again.")
|
||||
|
||||
@api.onchange('provider')
|
||||
def _onchange_provider(self):
|
||||
if self.provider == 'openai':
|
||||
self.endpoint = 'https://api.openai.com/v1/chat/completions'
|
||||
elif self.provider == 'azure':
|
||||
self.endpoint = 'https://odoo.openai.azure.com'
|
||||
|
||||
if self.provider:
|
||||
# 取头像
|
||||
module_path = modules.get_module_path('app_chatgpt', display_warning=False)
|
||||
if module_path:
|
||||
path = modules.check_resource_path(module_path, ('static/description/src/%s.png' % self.provider))
|
||||
if path:
|
||||
image_file = tools.file_open(path, 'rb')
|
||||
self.image_avatar = base64.b64encode(image_file.read())
|
||||
|
||||
@api.onchange('set_ai_model')
|
||||
def _onchange_set_ai_model(self):
|
||||
if self.set_ai_model:
|
||||
self.ai_model = self.set_ai_model
|
||||
else:
|
||||
self.ai_model = None
|
||||
|
||||
def filter_sensitive_words(self, data):
|
||||
if self.is_filtering:
|
||||
search = WordsSearch()
|
||||
s = self.sensitive_words
|
||||
if s:
|
||||
search.SetKeywords(s.split('\n'))
|
||||
else:
|
||||
search.SetKeywords([])
|
||||
data = search.Replace(text=data)
|
||||
return data
|
||||
else:
|
||||
return data
|
||||
|
||||
296
app_chatgpt/models/lib/WordsSearch.py
Normal file
296
app_chatgpt/models/lib/WordsSearch.py
Normal file
@@ -0,0 +1,296 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding:utf-8 -*-
|
||||
# ToolGood.Words.WordsSearch.py
|
||||
# 2020, Lin Zhijun, https://github.com/toolgood/ToolGood.Words
|
||||
# Licensed under the Apache License 2.0
|
||||
# 更新日志
|
||||
# 2020.04.06 第一次提交
|
||||
# 2020.05.16 修改,支持大于0xffff的字符
|
||||
|
||||
import os
|
||||
|
||||
__all__ = ['WordsSearch']
|
||||
__author__ = 'Lin Zhijun'
|
||||
__date__ = '2020.05.16'
|
||||
|
||||
|
||||
class TrieNode():
|
||||
def __init__(self):
|
||||
self.Index = 0
|
||||
self.Index = 0
|
||||
self.Layer = 0
|
||||
self.End = False
|
||||
self.Char = ''
|
||||
self.Results = []
|
||||
self.m_values = {}
|
||||
self.Failure = None
|
||||
self.Parent = None
|
||||
|
||||
def Add(self, c):
|
||||
if c in self.m_values:
|
||||
return self.m_values[c]
|
||||
node = TrieNode()
|
||||
node.Parent = self
|
||||
node.Char = c
|
||||
self.m_values[c] = node
|
||||
return node
|
||||
|
||||
def SetResults(self, index):
|
||||
if (self.End == False):
|
||||
self.End = True
|
||||
self.Results.append(index)
|
||||
|
||||
|
||||
class TrieNode2():
|
||||
def __init__(self):
|
||||
self.End = False
|
||||
self.Results = []
|
||||
self.m_values = {}
|
||||
self.minflag = 0xffff
|
||||
self.maxflag = 0
|
||||
|
||||
def Add(self, c, node3):
|
||||
if (self.minflag > c):
|
||||
self.minflag = c
|
||||
if (self.maxflag < c):
|
||||
self.maxflag = c
|
||||
self.m_values[c] = node3
|
||||
|
||||
def SetResults(self, index):
|
||||
if (self.End == False):
|
||||
self.End = True
|
||||
if (index in self.Results) == False:
|
||||
self.Results.append(index)
|
||||
|
||||
def HasKey(self, c):
|
||||
return c in self.m_values
|
||||
|
||||
def TryGetValue(self, c):
|
||||
if (self.minflag <= c and self.maxflag >= c):
|
||||
if c in self.m_values:
|
||||
return self.m_values[c]
|
||||
return None
|
||||
|
||||
|
||||
class WordsSearch():
|
||||
def __init__(self):
|
||||
self._first = {}
|
||||
self._keywords = []
|
||||
self._indexs = []
|
||||
|
||||
def SetKeywords(self, keywords):
|
||||
keyword_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'sensi_words.txt')
|
||||
s = open(keyword_path, 'r+', encoding='utf-8').read().split('\n')
|
||||
keywords += s
|
||||
self._keywords = keywords
|
||||
self._indexs = []
|
||||
for i in range(len(keywords)):
|
||||
self._indexs.append(i)
|
||||
|
||||
root = TrieNode()
|
||||
allNodeLayer = {}
|
||||
|
||||
for i in range(len(self._keywords)): # for (i = 0; i < _keywords.length; i++)
|
||||
p = self._keywords[i]
|
||||
nd = root
|
||||
for j in range(len(p)): # for (j = 0; j < p.length; j++)
|
||||
nd = nd.Add(ord(p[j]))
|
||||
if (nd.Layer == 0):
|
||||
nd.Layer = j + 1
|
||||
if nd.Layer in allNodeLayer:
|
||||
allNodeLayer[nd.Layer].append(nd)
|
||||
else:
|
||||
allNodeLayer[nd.Layer] = []
|
||||
allNodeLayer[nd.Layer].append(nd)
|
||||
nd.SetResults(i)
|
||||
|
||||
allNode = []
|
||||
allNode.append(root)
|
||||
for key in allNodeLayer.keys():
|
||||
for nd in allNodeLayer[key]:
|
||||
allNode.append(nd)
|
||||
allNodeLayer = None
|
||||
|
||||
for i in range(len(allNode)): # for (i = 0; i < allNode.length; i++)
|
||||
if i == 0:
|
||||
continue
|
||||
nd = allNode[i]
|
||||
nd.Index = i
|
||||
r = nd.Parent.Failure
|
||||
c = nd.Char
|
||||
while (r != None and (c in r.m_values) == False):
|
||||
r = r.Failure
|
||||
if (r == None):
|
||||
nd.Failure = root
|
||||
else:
|
||||
nd.Failure = r.m_values[c]
|
||||
for key2 in nd.Failure.Results:
|
||||
nd.SetResults(key2)
|
||||
root.Failure = root
|
||||
|
||||
allNode2 = []
|
||||
for i in range(len(allNode)): # for (i = 0; i < allNode.length; i++)
|
||||
allNode2.append(TrieNode2())
|
||||
|
||||
for i in range(len(allNode2)): # for (i = 0; i < allNode2.length; i++)
|
||||
oldNode = allNode[i]
|
||||
newNode = allNode2[i]
|
||||
|
||||
for key in oldNode.m_values:
|
||||
index = oldNode.m_values[key].Index
|
||||
newNode.Add(key, allNode2[index])
|
||||
|
||||
for index in range(len(oldNode.Results)): # for (index = 0; index < oldNode.Results.length; index++)
|
||||
item = oldNode.Results[index]
|
||||
newNode.SetResults(item)
|
||||
|
||||
oldNode = oldNode.Failure
|
||||
while oldNode != root:
|
||||
for key in oldNode.m_values:
|
||||
if (newNode.HasKey(key) == False):
|
||||
index = oldNode.m_values[key].Index
|
||||
newNode.Add(key, allNode2[index])
|
||||
for index in range(len(oldNode.Results)):
|
||||
item = oldNode.Results[index]
|
||||
newNode.SetResults(item)
|
||||
oldNode = oldNode.Failure
|
||||
allNode = None
|
||||
root = None
|
||||
|
||||
# first = []
|
||||
# for index in range(65535):# for (index = 0; index < 0xffff; index++)
|
||||
# first.append(None)
|
||||
|
||||
# for key in allNode2[0].m_values :
|
||||
# first[key] = allNode2[0].m_values[key]
|
||||
|
||||
self._first = allNode2[0]
|
||||
|
||||
def FindFirst(self, text):
|
||||
ptr = None
|
||||
for index in range(len(text)): # for (index = 0; index < text.length; index++)
|
||||
t = ord(text[index]) # text.charCodeAt(index)
|
||||
tn = None
|
||||
if (ptr == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
else:
|
||||
tn = ptr.TryGetValue(t)
|
||||
if (tn == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
|
||||
if (tn != None):
|
||||
if (tn.End):
|
||||
item = tn.Results[0]
|
||||
keyword = self._keywords[item]
|
||||
return {"Keyword": keyword, "Success": True, "End": index, "Start": index + 1 - len(keyword), "Index": self._indexs[item]}
|
||||
ptr = tn
|
||||
return None
|
||||
|
||||
def FindAll(self, text):
|
||||
ptr = None
|
||||
list = []
|
||||
|
||||
for index in range(len(text)): # for (index = 0; index < text.length; index++)
|
||||
t = ord(text[index]) # text.charCodeAt(index)
|
||||
tn = None
|
||||
if (ptr == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
else:
|
||||
tn = ptr.TryGetValue(t)
|
||||
if (tn == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
|
||||
if (tn != None):
|
||||
if (tn.End):
|
||||
for j in range(len(tn.Results)): # for (j = 0; j < tn.Results.length; j++)
|
||||
item = tn.Results[j]
|
||||
keyword = self._keywords[item]
|
||||
list.append({"Keyword": keyword, "Success": True, "End": index, "Start": index + 1 - len(keyword), "Index": self._indexs[item]})
|
||||
ptr = tn
|
||||
return list
|
||||
|
||||
def ContainsAny(self, text):
|
||||
ptr = None
|
||||
for index in range(len(text)): # for (index = 0; index < text.length; index++)
|
||||
t = ord(text[index]) # text.charCodeAt(index)
|
||||
tn = None
|
||||
if (ptr == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
else:
|
||||
tn = ptr.TryGetValue(t)
|
||||
if (tn == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
|
||||
if (tn != None):
|
||||
if (tn.End):
|
||||
return True
|
||||
ptr = tn
|
||||
return False
|
||||
|
||||
def Replace(self, text, replaceChar='*'):
|
||||
result = list(text)
|
||||
|
||||
ptr = None
|
||||
for i in range(len(text)): # for (i = 0; i < text.length; i++)
|
||||
t = ord(text[i]) # text.charCodeAt(index)
|
||||
tn = None
|
||||
if (ptr == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
else:
|
||||
tn = ptr.TryGetValue(t)
|
||||
if (tn == None):
|
||||
tn = self._first.TryGetValue(t)
|
||||
|
||||
if (tn != None):
|
||||
if (tn.End):
|
||||
maxLength = len(self._keywords[tn.Results[0]])
|
||||
start = i + 1 - maxLength
|
||||
for j in range(start, i + 1): # for (j = start; j <= i; j++)
|
||||
result[j] = replaceChar
|
||||
ptr = tn
|
||||
return ''.join(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
s = "中国|国人|zg人|乾清宫"
|
||||
test = "我是中国人"
|
||||
|
||||
search = WordsSearch()
|
||||
search.SetKeywords(s.split('|'))
|
||||
|
||||
print("----------------------------------- WordsSearch -----------------------------------")
|
||||
|
||||
print("WordsSearch FindFirst is run.")
|
||||
f = search.FindFirst(test)
|
||||
if f["Keyword"] != "中国":
|
||||
print("WordsSearch FindFirst is error.............................")
|
||||
|
||||
print("WordsSearch FindFirst is run.")
|
||||
all = search.FindAll("乾清宫")
|
||||
if all[0]["Keyword"] != "乾清宫":
|
||||
print("WordsSearch FindFirst is error.............................")
|
||||
|
||||
print("WordsSearch FindAll is run.")
|
||||
all = search.FindAll(test)
|
||||
if all[0]["Keyword"] != "中国":
|
||||
print("WordsSearch FindAll is error.............................")
|
||||
if all[1]["Keyword"] != "国人":
|
||||
print("WordsSearch FindAll is error.............................")
|
||||
if all[0]["Start"] != 2:
|
||||
print("WordsSearch FindAll is error.............................")
|
||||
if all[0]["End"] != 3:
|
||||
print("WordsSearch FindAll is error.............................")
|
||||
if len(all) != 2:
|
||||
print("WordsSearch FindAll is error.............................")
|
||||
|
||||
print("WordsSearch ContainsAny is run.")
|
||||
b = search.ContainsAny(test)
|
||||
if b == False:
|
||||
print("WordsSearch ContainsAny is error.............................")
|
||||
|
||||
print("WordsSearch Replace is run.")
|
||||
txt = search.Replace(test)
|
||||
if (txt != "我是***"):
|
||||
print("WordsSearch Replace is error.............................")
|
||||
|
||||
print("----------------------------------- Test End -----------------------------------")
|
||||
14093
app_chatgpt/models/lib/sensi_words.txt
Normal file
14093
app_chatgpt/models/lib/sensi_words.txt
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,12 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
import openai
|
||||
import requests,json
|
||||
import requests, json
|
||||
import datetime
|
||||
# from transformers import TextDavinciTokenizer, TextDavinciModel
|
||||
from odoo import api, fields, models, _
|
||||
from odoo import api, fields, models, tools, _
|
||||
from odoo.exceptions import UserError
|
||||
from odoo.osv import expression
|
||||
from odoo.addons.app_common.models.base import get_ua_type
|
||||
|
||||
import logging
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -14,207 +16,345 @@ _logger = logging.getLogger(__name__)
|
||||
class Channel(models.Model):
|
||||
_inherit = 'mail.channel'
|
||||
|
||||
@api.model
|
||||
def get_openai(self, api_key, ai_model, data, user="Odoo"):
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
|
||||
R_TIMEOUT = 5
|
||||
|
||||
if ai_model == 'dall-e2':
|
||||
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
||||
# image_url = response['data'][0]['url']
|
||||
# https://platform.openai.com/docs/guides/images/introduction
|
||||
pdata = {
|
||||
"prompt": data,
|
||||
"n": 3,
|
||||
"size": "1024x1024",
|
||||
}
|
||||
return '建设中'
|
||||
elif ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||
pdata = {
|
||||
"model": ai_model,
|
||||
"messages": [{"role": "user", "content": data}],
|
||||
"temperature": 0.9,
|
||||
"max_tokens": 2000,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0.0,
|
||||
"presence_penalty": 0.6,
|
||||
"user": user,
|
||||
"stop": ["Human:", "AI:"]
|
||||
}
|
||||
response = requests.post("https://api.openai.com/v1/chat/completions", data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||
res = response.json()
|
||||
if 'choices' in res:
|
||||
# for rec in res:
|
||||
# res = rec['message']['content']
|
||||
res = '\n'.join([x['message']['content'] for x in res['choices']])
|
||||
return res
|
||||
else:
|
||||
pdata = {
|
||||
"model": ai_model,
|
||||
"prompt": data,
|
||||
"temperature": 0.9,
|
||||
"max_tokens": 2000,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0.0,
|
||||
"presence_penalty": 0.6,
|
||||
"user": user,
|
||||
"stop": ["Human:", "AI:"]
|
||||
}
|
||||
response = requests.post("https://api.openai.com/v1/completions", data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||
res = response.json()
|
||||
if 'choices' in res:
|
||||
res = '\n'.join([x['text'] for x in res['choices']])
|
||||
return res
|
||||
# 获取模型信息
|
||||
# list_model = requests.get("https://api.openai.com/v1/models", headers=headers)
|
||||
# model_info = requests.get("https://api.openai.com/v1/models/%s" % ai_model, headers=headers)
|
||||
|
||||
return "获取结果超时,请重新跟我聊聊。"
|
||||
is_private = fields.Boolean(string="Private", default=False, help="Check to set Private, Can only use by user, not Public")
|
||||
# 因为 channel_member_ids 不好处理,在此增加此字段
|
||||
# 主Ai
|
||||
ai_partner_id = fields.Many2one(comodel_name="res.partner", string="Main Ai", required=False,
|
||||
domain=[('gpt_id', '!=', None), ('is_chat_private', '=', True)],
|
||||
default=lambda self: self._app_get_m2o_default('ai_partner_id'),
|
||||
help="Main Ai is the robot help you default.")
|
||||
ext_ai_partner_id = fields.Many2one(comodel_name="res.partner", string="Secondary Ai",
|
||||
domain=[('gpt_id', '!=', None), ('is_chat_private', '=', True)])
|
||||
description = fields.Char('Ai Character', help="Ai would help you act as the Character set.")
|
||||
set_max_tokens = fields.Selection([
|
||||
('300', 'Short'),
|
||||
('600', 'Standard'),
|
||||
('1000', 'Medium'),
|
||||
('2000', 'Long'),
|
||||
('3000', 'Overlength'),
|
||||
('32000', '32K'),
|
||||
], string='Max Response', default='600', help="越大返回内容越多,计费也越多")
|
||||
set_chat_count = fields.Selection([
|
||||
('none', 'Ai Auto'),
|
||||
('1', '1标准'),
|
||||
('3', '3强关联'),
|
||||
('5', '5超强关联'),
|
||||
], string="History Count", default='1', help="0-5,设定后,会将最近n次对话发给Ai,有助于他更好的回答,但太大费用也高")
|
||||
set_temperature = fields.Selection([
|
||||
('2', '天马行空'),
|
||||
('1.5', '创造性'),
|
||||
('1', '标准'),
|
||||
('0.6', '理性'),
|
||||
('0.1', '保守'),
|
||||
], string="Set Temperature", default='1', help="0-21,值越大越富有想像力,越小则越保守")
|
||||
set_top_p = fields.Selection([
|
||||
('0.9', '严谨惯性思维'),
|
||||
('0.6', '标准推理'),
|
||||
('0.4', '跳跃性'),
|
||||
('0.1', '随便'),
|
||||
], string="Top Probabilities", default='0.6', help="0-1,值越大越倾向大众化的连贯思维")
|
||||
# 避免使用常用词
|
||||
set_frequency_penalty = fields.Selection([
|
||||
('2', '老学究-晦涩难懂'),
|
||||
('1.5', '学院派-较多高级词'),
|
||||
('1', '标准'),
|
||||
('0.1', '少常用词'),
|
||||
('-1', '通俗易懂'),
|
||||
('-2', '大白话'),
|
||||
], string='Frequency Penalty', default='1', help="-2~2,值越大越少使用常用词")
|
||||
set_presence_penalty = fields.Selection([
|
||||
('2', '多样强迫症'),
|
||||
('1.5', '新颖化'),
|
||||
('1', '标准'),
|
||||
('0.1', '允许常规重复'),
|
||||
('-1', '允许较多重复'),
|
||||
('-2', '更多强调重复'),
|
||||
], string='Presence penalty', default='1', help="-2~2,值越大越少重复词")
|
||||
|
||||
@api.model
|
||||
def get_openai_context(self, channel_id, partner_chatgpt, current_prompt, seconds=600):
|
||||
afterTime = fields.Datetime.now() - datetime.timedelta(seconds=seconds)
|
||||
message_model = self.env['mail.message'].sudo()
|
||||
prompt = [f"Human:{current_prompt}\nAI:", ]
|
||||
domain = [('res_id', '=', channel_id),
|
||||
('model', '=', 'mail.channel'),
|
||||
('message_type', '!=', 'user_notification'),
|
||||
('parent_id', '=', False),
|
||||
('date', '>=', afterTime),
|
||||
('author_id', '=', self.env.user.partner_id.id)]
|
||||
messages = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=15)
|
||||
# print('domain:',domain)
|
||||
# print('messages:',messages)
|
||||
for msg in messages:
|
||||
ai_msg = message_model.search([("res_id", "=", channel_id),
|
||||
('model', '=', msg.model),
|
||||
('parent_id', '=', msg.id),
|
||||
('author_id', '=', partner_chatgpt),
|
||||
('body', '!=', '<p>获取结果超时,请重新跟我聊聊。</p>')])
|
||||
if ai_msg:
|
||||
prompt.append("Human:%s\nAI:%s" % (
|
||||
msg.body.replace("<p>", "").replace("</p>", ""), ai_msg.body.replace("<p>", "").replace("</p>", "")))
|
||||
# print(msg.body.replace("<p>", "").replace("</p>", ""))
|
||||
# print(ai_msg.body.replace("<p>", "").replace("</p>", ""))
|
||||
# todo: 这里用 compute?
|
||||
max_tokens = fields.Integer('最长响应Token', default=600, help="越大返回内容越多,计费也越多")
|
||||
chat_count = fields.Integer(string="上下文数量", default=0, help="0~3,设定后,会将最近n次对话发给Ai,有助于他更好的回答")
|
||||
temperature = fields.Float(string="创造性值", default=1, help="0~2,值越大越富有想像力,越小则越保守")
|
||||
top_p = fields.Float(string="连贯性值", default=0.6, help="0~1,值越大越富有想像力,越小则越保守")
|
||||
frequency_penalty = fields.Float('避免常用词值', default=1, help="-2~2,值越大越少使用常用词")
|
||||
presence_penalty = fields.Float('避免重复词值', default=1, help="-2~2,值越大越少重复词")
|
||||
|
||||
is_current_channel = fields.Boolean('是否当前用户默认频道', compute='_compute_is_current_channel', help='是否当前用户默认微信对话频道')
|
||||
|
||||
def name_get(self):
|
||||
result = []
|
||||
for c in self:
|
||||
if c.channel_type == 'channel' and c.is_private:
|
||||
pre = '[私]'
|
||||
else:
|
||||
_logger.error(f"not find for id:{str(msg.id)}")
|
||||
pre = ''
|
||||
result.append((c.id, "%s%s" % (pre, c.name or '')))
|
||||
return result
|
||||
|
||||
return '\n'.join(prompt[::-1])
|
||||
def get_openai_context(self, channel_id, author_id, answer_id, minutes=60, chat_count=0):
|
||||
# 上下文处理,要处理群的方式,以及独聊的方式
|
||||
# azure新api 处理
|
||||
context_history = []
|
||||
afterTime = fields.Datetime.now() - datetime.timedelta(minutes=minutes)
|
||||
message_model = self.env['mail.message'].sudo()
|
||||
# 处理消息: 取最新问题 + 上 chat_count=1次的交互,将之前的交互按时间顺序拼接。
|
||||
# 注意: ai 每一次回复都有 parent_id 来处理连续性
|
||||
# 私聊处理
|
||||
|
||||
def get_chatgpt_answer(self, prompt, partner_name):
|
||||
response = openai.Completion.create(
|
||||
model="text-davinci-003",
|
||||
prompt=prompt,
|
||||
temperature=0.6,
|
||||
max_tokens=3000,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0,
|
||||
user=partner_name,
|
||||
)
|
||||
res = response['choices'][0]['text']
|
||||
return res
|
||||
# todo: 更好的处理方式
|
||||
domain = [('res_id', '=', channel_id),
|
||||
('model', '=', 'mail.channel'),
|
||||
('message_type', '!=', 'user_notification'),
|
||||
('parent_id', '!=', False),
|
||||
('is_ai', '=', True),
|
||||
('body', '!=', '<p>%s</p>' % _('Response Timeout, please speak again.')),
|
||||
('body', '!=', _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。'))]
|
||||
|
||||
if self.channel_type in ['group', 'channel']:
|
||||
# 群聊增加时间限制,当前找所有人,不限制 author_id
|
||||
domain = expression.AND([domain, [('date', '>=', afterTime)]])
|
||||
else:
|
||||
domain = expression.AND([domain, [('author_id', '=', answer_id.id)]])
|
||||
if chat_count == 0:
|
||||
ai_msg_list = []
|
||||
else:
|
||||
ai_msg_list = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=chat_count)
|
||||
for ai_msg in ai_msg_list:
|
||||
# 判断这个 ai_msg 是不是ai发,有才 insert。 判断 user_msg 是不是 user发的,有才 insert
|
||||
user_msg = ai_msg.parent_id.sudo()
|
||||
if ai_msg.author_id.sudo().gpt_id and answer_id.sudo().gpt_id and ai_msg.author_id.sudo().gpt_id == answer_id.sudo().gpt_id:
|
||||
ai_content = str(ai_msg.body).replace("<p>", "").replace("</p>", "").replace("<p>", "")
|
||||
context_history.insert(0, {
|
||||
'role': 'assistant',
|
||||
'content': ai_content,
|
||||
})
|
||||
if not user_msg.author_id.gpt_id:
|
||||
user_content = user_msg.description.replace("<p>", "").replace("</p>", "").replace('@%s' % answer_id.name, '').lstrip()
|
||||
context_history.insert(0, {
|
||||
'role': 'user',
|
||||
'content': user_content,
|
||||
})
|
||||
return context_history
|
||||
|
||||
def get_ai_config(self, ai):
|
||||
# 勾子,用于取ai 配置
|
||||
return {}
|
||||
|
||||
def get_ai_response(self, ai, messages, channel, user_id, message):
|
||||
author_id = message.create_uid.partner_id
|
||||
answer_id = user_id.partner_id
|
||||
# todo: 只有个人配置的群聊才给配置
|
||||
param = self.get_ai_config(ai)
|
||||
res, usage, is_ai = ai.get_ai(messages, author_id, answer_id, param)
|
||||
if res:
|
||||
if get_ua_type() != 'wxweb':
|
||||
# 处理当微信语音返回时,是直接回文本信息,不需要转换回车
|
||||
res = res.replace('\n', '<br/>')
|
||||
new_msg = channel.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id)
|
||||
if usage:
|
||||
if ai.provider == 'ali':
|
||||
prompt_tokens = usage['input_tokens']
|
||||
completion_tokens = usage['output_tokens']
|
||||
total_tokens = usage['input_tokens'] + usage['output_tokens']
|
||||
else:
|
||||
prompt_tokens = usage['prompt_tokens']
|
||||
completion_tokens = usage['completion_tokens']
|
||||
total_tokens = usage['total_tokens']
|
||||
new_msg.write({
|
||||
'human_prompt_tokens': prompt_tokens,
|
||||
'ai_completion_tokens': completion_tokens,
|
||||
'cost_tokens': total_tokens,
|
||||
})
|
||||
|
||||
def _notify_thread(self, message, msg_vals=False, **kwargs):
|
||||
rdata = super(Channel, self)._notify_thread(message, msg_vals=msg_vals, **kwargs)
|
||||
# print(f'rdata:{rdata}')
|
||||
to_partner_id = self.env['res.partner']
|
||||
answer_id = self.env['res.partner']
|
||||
user_id = self.env['res.users']
|
||||
author_id = msg_vals.get('author_id')
|
||||
gpt_id = self.env['ai.robot']
|
||||
ai = self.env['ai.robot'].sudo()
|
||||
channel = self.env['mail.channel']
|
||||
channel_type = self.channel_type
|
||||
messages = []
|
||||
|
||||
# 不处理 一般notify,但处理欢迎
|
||||
if '<div class="o_mail_notification' in message.body and message.body != _('<div class="o_mail_notification">joined the channel</div>'):
|
||||
return rdata
|
||||
if 'o_odoobot_command' in message.body:
|
||||
return rdata
|
||||
|
||||
if channel_type == 'chat':
|
||||
channel_partner_ids = self.channel_partner_ids
|
||||
to_partner_id = channel_partner_ids - message.author_id
|
||||
user_id = to_partner_id.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
|
||||
if user_id:
|
||||
answer_id = channel_partner_ids - message.author_id
|
||||
user_id = answer_id.mapped('user_ids').sudo().filtered(lambda r: r.gpt_id)[:1]
|
||||
if user_id and answer_id.gpt_id:
|
||||
gpt_policy = user_id.gpt_policy
|
||||
gpt_wl_users = user_id.gpt_wl_users
|
||||
is_allow = message.create_uid.id in gpt_wl_users.ids
|
||||
gpt_wl_partners = user_id.gpt_wl_partners
|
||||
is_allow = message.author_id.id in gpt_wl_partners.ids
|
||||
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||
gpt_id = user_id.gpt_id
|
||||
ai = answer_id.sudo().gpt_id
|
||||
|
||||
elif channel_type in ['group', 'channel']:
|
||||
# partner_ids = @ ids
|
||||
partner_ids = list(msg_vals.get('partner_ids'))
|
||||
if hasattr(self, 'ai_partner_id') and self.ai_partner_id:
|
||||
# 当有主id时,使用主id
|
||||
if self.ai_partner_id.id in partner_ids:
|
||||
partner_ids = [self.ai_partner_id.id]
|
||||
if partner_ids:
|
||||
# 常规群聊 @
|
||||
partners = self.env['res.partner'].search([('id', 'in', partner_ids)])
|
||||
# user_id = user has binded gpt robot
|
||||
user_id = partners.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
|
||||
if user_id:
|
||||
gpt_policy = user_id.gpt_policy
|
||||
gpt_wl_users = user_id.gpt_wl_users
|
||||
is_allow = message.create_uid.id in gpt_wl_users.ids
|
||||
to_partner_id = user_id.partner_id
|
||||
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||
gpt_id = user_id.gpt_id
|
||||
|
||||
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
||||
|
||||
# print('author_id:',author_id)
|
||||
|
||||
# print('partner_chatgpt.id:',partner_chatgpt.id)
|
||||
|
||||
prompt = msg_vals.get('body')
|
||||
# print('prompt:', prompt)
|
||||
# print('-----')
|
||||
if not prompt:
|
||||
# user_id = user, who has binded gpt robot
|
||||
user_id = partners.mapped('user_ids').sudo().filtered(lambda r: r.gpt_id)[:1]
|
||||
elif message.body == _('<div class="o_mail_notification">joined the channel</div>'):
|
||||
# 欢迎的情况
|
||||
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
|
||||
user_id = partners.mapped('user_ids')[:1]
|
||||
elif self.member_count == 2:
|
||||
# 处理独聊频道
|
||||
if hasattr(self, 'is_private') and not self.is_private:
|
||||
# 2个人的非私有频道不处理
|
||||
pass
|
||||
else:
|
||||
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id and r != message.author_id)[:1]
|
||||
user_id = partners.mapped('user_ids')[:1]
|
||||
elif not message.author_id.gpt_id:
|
||||
# 没有@时,默认第一个robot
|
||||
# robot = self.env.ref('app_chatgpt.chatgpt_robot')
|
||||
# 临时用azure
|
||||
if hasattr(self, 'ai_partner_id') and self.ai_partner_id:
|
||||
# 当有主id时,使用主id
|
||||
user_id = self.ai_partner_id.mapped('user_ids')[:1]
|
||||
else:
|
||||
# 使用群里的第一个robot
|
||||
partners = self.channel_partner_ids.sudo().filtered(lambda r: r.gpt_id)[:1]
|
||||
user_id = partners.mapped('user_ids')[:1]
|
||||
if user_id:
|
||||
ai = user_id.sudo().gpt_id
|
||||
# 此处理不判断,将此处逻辑迁移至 get_ai_pre, 非ai回复的直接内容注意设置为 is_ai=false
|
||||
# gpt_policy = user_id.gpt_policy
|
||||
# gpt_wl_partners = user_id.gpt_wl_partners
|
||||
# is_allow = message.author_id.id in gpt_wl_partners.ids
|
||||
# answer_id = user_id.partner_id
|
||||
# if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||
# ai = user_id.sudo().gpt_id
|
||||
# elif user_id.gpt_id and not is_allow:
|
||||
# # 暂时有限用户的Ai
|
||||
# raise UserError(_('此Ai暂时未开放,请联系管理员。'))
|
||||
if hasattr(ai, 'is_translator') and ai.is_translator:
|
||||
return rdata
|
||||
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
||||
|
||||
if message.body == _('<div class="o_mail_notification">joined the channel</div>'):
|
||||
msg = _("Please warmly welcome our new partner %s and send him the best wishes.") % message.author_id.name
|
||||
else:
|
||||
# 不能用 preview, 如果用 : 提示词则 preview信息丢失
|
||||
plaintext_ct = tools.html_to_inner_content(message.body)
|
||||
msg = plaintext_ct.replace('@%s' % answer_id.name, '').lstrip()
|
||||
|
||||
if not msg:
|
||||
return rdata
|
||||
|
||||
if self._context.get('app_ai_sync_config') and self._context.get('app_ai_sync_config') in ['sync', 'async']:
|
||||
sync_config = self._context.get('app_ai_sync_config')
|
||||
else:
|
||||
sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config')
|
||||
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
|
||||
api_key = ''
|
||||
if gpt_id:
|
||||
api_key = gpt_id.openapi_api_key
|
||||
# ai处理,不要自问自答
|
||||
if ai and answer_id != message.author_id:
|
||||
api_key = ai.openapi_api_key
|
||||
if not api_key:
|
||||
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
|
||||
return rdata
|
||||
try:
|
||||
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 600
|
||||
except:
|
||||
openapi_context_timeout = 600
|
||||
try:
|
||||
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60
|
||||
except:
|
||||
openapi_context_timeout = 60
|
||||
openai.api_key = api_key
|
||||
# 非4版本,取0次。其它取3 次历史
|
||||
chat_count = 3
|
||||
if '4' in ai.ai_model or '4' in ai.name:
|
||||
chat_count = 1
|
||||
if hasattr(self, 'chat_count'):
|
||||
if self.chat_count > 0:
|
||||
chat_count = 1
|
||||
else:
|
||||
chat_count = chat_count
|
||||
|
||||
if author_id != answer_id.id and self.channel_type == 'chat':
|
||||
# 私聊
|
||||
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
||||
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
|
||||
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
||||
# todo: 公开的群聊,当前只开1个,后续更多
|
||||
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
||||
channel = chatgpt_channel_id
|
||||
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and self.channel_type in ['group', 'channel']:
|
||||
# 高级用户自建的话题
|
||||
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
|
||||
if hasattr(channel, 'is_private') and channel.description:
|
||||
messages.append({"role": "system", "content": channel.description})
|
||||
|
||||
try:
|
||||
c_history = self.get_openai_context(channel.id, author_id, answer_id, openapi_context_timeout, chat_count)
|
||||
if c_history:
|
||||
messages += c_history
|
||||
messages.append({"role": "user", "content": msg})
|
||||
msg_len = sum(len(str(m)) for m in messages)
|
||||
# 接口最大接收 8430 Token
|
||||
if msg_len * 2 > ai.max_send_char:
|
||||
messages = []
|
||||
if hasattr(channel, 'is_private') and channel.description:
|
||||
messages.append({"role": "system", "content": channel.description})
|
||||
messages.append({"role": "user", "content": msg})
|
||||
msg_len = sum(len(str(m)) for m in messages)
|
||||
if msg_len * 2 > ai.max_send_char:
|
||||
new_msg = channel.with_user(user_id).message_post(body=_('您所发送的提示词已超长。'), message_type='comment',
|
||||
subtype_xmlid='mail.mt_comment',
|
||||
parent_id=message.id)
|
||||
|
||||
openai.api_key = api_key
|
||||
partner_name = ''
|
||||
# print(msg_vals)
|
||||
# print(msg_vals.get('record_name', ''))
|
||||
# print('self.channel_type :',self.channel_type)
|
||||
if gpt_id:
|
||||
ai_model = gpt_id.ai_model or 'text-davinci-003'
|
||||
# print('chatgpt_name:', chatgpt_name)
|
||||
# if author_id != to_partner_id.id and (chatgpt_name in msg_vals.get('record_name', '') or 'ChatGPT' in msg_vals.get('record_name', '') ) and self.channel_type == 'chat':
|
||||
if author_id != to_partner_id.id and self.channel_type == 'chat':
|
||||
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
||||
try:
|
||||
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
|
||||
if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt, openapi_context_timeout)
|
||||
print(prompt)
|
||||
# res = self.get_chatgpt_answer(prompt,partner_name)
|
||||
res = self.get_openai(api_key, ai_model, prompt, partner_name)
|
||||
res = res.replace('\n', '<br/>')
|
||||
# print('res:',res)
|
||||
# print('channel:',channel)
|
||||
channel.with_user(user_id).message_post(body=res, message_type='comment',subtype_xmlid='mail.mt_comment', parent_id=message.id)
|
||||
# channel.with_user(user_chatgpt).message_post(body=res, message_type='notification', subtype_xmlid='mail.mt_comment')
|
||||
# channel.sudo().message_post(
|
||||
# body=res,
|
||||
# author_id=partner_chatgpt.id,
|
||||
# message_type="comment",
|
||||
# subtype_xmlid="mail.mt_comment",
|
||||
# )
|
||||
# self.with_user(user_chatgpt).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment')
|
||||
except Exception as e:
|
||||
raise UserError(_(e))
|
||||
|
||||
elif author_id != to_partner_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
||||
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
||||
try:
|
||||
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt, openapi_context_timeout)
|
||||
# print(prompt)
|
||||
# res = self.get_chatgpt_answer(prompt, partner_name)
|
||||
res = self.get_openai(api_key, ai_model, prompt, partner_name)
|
||||
res = res.replace('\n', '<br/>')
|
||||
chatgpt_channel_id.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment',parent_id=message.id)
|
||||
except Exception as e:
|
||||
raise UserError(_(e))
|
||||
# if msg_len * 2 >= 8000:
|
||||
# messages = [{"role": "user", "content": msg}]
|
||||
if sync_config == 'sync':
|
||||
self.get_ai_response(ai, messages, channel, user_id, message)
|
||||
else:
|
||||
self.with_delay().get_ai_response(ai, messages, channel, user_id, message)
|
||||
except Exception as e:
|
||||
raise UserError(_(e))
|
||||
|
||||
return rdata
|
||||
|
||||
def _message_post_after_hook(self, message, msg_vals):
|
||||
if message.author_id.gpt_id:
|
||||
if msg_vals['body'] not in [_('Response Timeout, please speak again.'), _('温馨提示:您发送的内容含有敏感词,请修改内容后再向我发送。'),
|
||||
_('此Ai暂时未开放,请联系管理员。'), _('您所发送的提示词已超长。')]:
|
||||
message.is_ai = True
|
||||
return super(Channel, self)._message_post_after_hook(message, msg_vals)
|
||||
|
||||
@api.model
|
||||
def _get_my_last_cid(self):
|
||||
# 获取当前用户最后一次进入的channel,返回该channel的id
|
||||
# todo: 优化,每次聊天进入时就 write
|
||||
user = self.env.user
|
||||
msgs = self.env['mail.message'].sudo().search([
|
||||
('model', '=', 'mail.channel'),
|
||||
('author_id', '=', user.partner_id.id),
|
||||
], limit=3, order='id desc')
|
||||
c_id = 0
|
||||
c = self
|
||||
for m in msgs:
|
||||
c = self.browse(m.res_id)
|
||||
if c.is_member:
|
||||
c_id = c.id
|
||||
break
|
||||
if not c_id:
|
||||
c = self.env.ref('app_chatgpt.channel_chatgpt', raise_if_not_found=False)
|
||||
c_id = c.id or False
|
||||
if c and not c.is_member:
|
||||
c.sudo().add_members([user.partner_id.id])
|
||||
return c_id
|
||||
|
||||
@api.onchange('ai_partner_id')
|
||||
def _onchange_ai_partner_id(self):
|
||||
if self.ai_partner_id and self.ai_partner_id.image_1920:
|
||||
self.image_128 = self.ai_partner_id.avatar_128
|
||||
|
||||
@@ -6,8 +6,25 @@ from odoo import fields, models
|
||||
class Message(models.Model):
|
||||
_inherit = "mail.message"
|
||||
|
||||
human_prompt_tokens = fields.Integer('Human Prompt Tokens')
|
||||
ai_completion_tokens = fields.Integer('AI Completion Tokens')
|
||||
cost_tokens = fields.Integer('Cost Tokens')
|
||||
# 是否ai回复
|
||||
is_ai = fields.Boolean('Is Ai', default=False)
|
||||
|
||||
def _message_add_reaction(self, content):
|
||||
super(Message, self)._message_add_reaction(content)
|
||||
if self.create_uid.gpt_id:
|
||||
# 处理反馈
|
||||
pass
|
||||
|
||||
def message_format(self, format_reply=True):
|
||||
message_values = super(Message, self).message_format(format_reply=format_reply)
|
||||
|
||||
for message in message_values:
|
||||
message_sudo = self.browse(message['id']).sudo().with_prefetch(self.ids)
|
||||
message['human_prompt_tokens'] = message_sudo.human_prompt_tokens
|
||||
message['ai_completion_tokens'] = message_sudo.ai_completion_tokens
|
||||
message['cost_tokens'] = message_sudo.cost_tokens
|
||||
message['is_ai'] = message_sudo.is_ai
|
||||
return message_values
|
||||
|
||||
8
app_chatgpt/models/mail_thread.py
Normal file
8
app_chatgpt/models/mail_thread.py
Normal file
@@ -0,0 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from odoo import fields, models, api, _
|
||||
|
||||
|
||||
class MailThread(models.AbstractModel):
|
||||
_inherit = "mail.thread"
|
||||
|
||||
21
app_chatgpt/models/res.json
Normal file
21
app_chatgpt/models/res.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
'id': 'chatcmpl-747IRWr2Ij3HA6NVTWp4ZTnEA2grW',
|
||||
'object': 'chat.completion',
|
||||
'created': 1681215715,
|
||||
'model': 'gpt-3.5-turbo-0301',
|
||||
'usage': {
|
||||
'prompt_tokens': 17,
|
||||
'completion_tokens': 38,
|
||||
'total_tokens': 55
|
||||
},
|
||||
'choices': [
|
||||
{
|
||||
'message': {
|
||||
'role': 'assistant',
|
||||
'content': '非常抱歉,我不太理解您在说什么。 可以提供更多背景信息或上下文吗?'
|
||||
},
|
||||
'finish_reason': 'stop',
|
||||
'index': 0
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
|
||||
|
||||
from odoo import fields, models
|
||||
|
||||
@@ -7,4 +6,11 @@ from odoo import fields, models
|
||||
class ResConfigSettings(models.TransientModel):
|
||||
_inherit = "res.config.settings"
|
||||
|
||||
openapi_context_timeout = fields.Integer(string="上下文连接超时", help="多少秒以内的聊天信息作为上下文继续", config_parameter="app_chatgpt.openapi_context_timeout")
|
||||
openapi_context_timeout = fields.Integer(string="Connect Timout", help="群聊中多少分钟以内的聊天信息作为上下文继续", config_parameter="app_chatgpt.openapi_context_timeout")
|
||||
openai_sync_config = fields.Selection([
|
||||
('sync', 'Synchronous'),
|
||||
('async', 'Asynchronous')
|
||||
], string='Sync Config', default='sync', config_parameter="app_chatgpt.openai_sync_config")
|
||||
module_app_ai_bard = fields.Boolean("Google Bard Ai")
|
||||
module_app_ai_baidu = fields.Boolean("Baidu Ai China", help='百度文心一格')
|
||||
module_app_ai_ali = fields.Boolean("Ali Ai China", help='阿里通义千问')
|
||||
|
||||
58
app_chatgpt/models/res_partner.py
Normal file
58
app_chatgpt/models/res_partner.py
Normal file
@@ -0,0 +1,58 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from odoo import fields, models, api
|
||||
|
||||
|
||||
class ResPartner(models.Model):
|
||||
_inherit = "res.partner"
|
||||
|
||||
gpt_id = fields.Many2one('ai.robot', string='Bind to Ai', ondelete='set null')
|
||||
|
||||
is_chat_private = fields.Boolean('Allow Chat Private', default=False)
|
||||
|
||||
@api.model
|
||||
def im_search(self, name, limit=20):
|
||||
users = self.env['res.users'].search([
|
||||
('id', '!=', self.env.user.id),
|
||||
('name', 'ilike', name),
|
||||
('active', '=', True),
|
||||
('share', '=', False),
|
||||
('is_chat_private', '=', True)
|
||||
], order='gpt_id, name, id', limit=limit)
|
||||
return list(users.partner_id.mail_partner_format().values())
|
||||
|
||||
def mail_partner_format(self, fields=None):
|
||||
# 直接覆盖原生,增加 gpt_id 字段
|
||||
partners_format = dict()
|
||||
if not fields:
|
||||
fields = {'id': True, 'name': True, 'email': True, 'active': True, 'im_status': True, 'gpt_id': 0, 'user': {}}
|
||||
for partner in self:
|
||||
data = {}
|
||||
if 'id' in fields:
|
||||
data['id'] = partner.id
|
||||
if 'name' in fields:
|
||||
name = partner.name
|
||||
# 英文不好分,暂时不隐名
|
||||
# if not partner.related_user_id.gpt_id:
|
||||
# name = partner.name[0] + '*' * (len(partner.name) - 1)
|
||||
data['name'] = name
|
||||
if 'email' in fields:
|
||||
data['email'] = partner.email
|
||||
if 'active' in fields:
|
||||
data['active'] = partner.active
|
||||
if 'im_status' in fields:
|
||||
data['im_status'] = partner.im_status
|
||||
if 'gpt_id' in fields:
|
||||
data['gpt_id'] = partner.gpt_id.id if partner.gpt_id else 0
|
||||
if 'user' in fields:
|
||||
internal_users = partner.user_ids - partner.user_ids.filtered('share')
|
||||
main_user = internal_users[0] if len(internal_users) > 0 else partner.user_ids[0] if len(partner.user_ids) > 0 else self.env['res.users']
|
||||
data['user'] = {
|
||||
"id": main_user.id,
|
||||
"isInternalUser": not main_user.share,
|
||||
} if main_user else [('clear',)]
|
||||
# if 'guest' in self.env.context or not self.env.user.has_group('base.group_erp_manager'):
|
||||
# 完全不显示 邮箱
|
||||
data.pop('email', None)
|
||||
partners_format[partner] = data
|
||||
return partners_format
|
||||
24
app_chatgpt/models/res_partner_ai_use.py
Normal file
24
app_chatgpt/models/res_partner_ai_use.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from odoo import fields, models
|
||||
|
||||
|
||||
class ResPartnerAiUse(models.Model):
|
||||
_name = "res.partner.ai.use"
|
||||
_description = '消费者Ai使用情况'
|
||||
|
||||
name = fields.Many2one('res.partner', 'Partner')
|
||||
ai_user_id = fields.Many2one('res.users', 'Ai User', domain=[('gpt_id', '!=', False)])
|
||||
first_ask_time = fields.Datetime('First Ask Time')
|
||||
latest_ask_time = fields.Datetime('Latest Ask Time')
|
||||
service_start_date = fields.Datetime('Service Start Date')
|
||||
service_end_date = fields.Datetime('Service End Date')
|
||||
used_number = fields.Integer('Number of Used')
|
||||
max_number = fields.Integer('Max Number of Call')
|
||||
human_prompt_tokens = fields.Integer('Human Prompt Tokens')
|
||||
ai_completion_tokens = fields.Integer('AI Completion Tokens')
|
||||
tokens_total = fields.Integer('Total Tokens')
|
||||
token_balance = fields.Integer('Token Balance')
|
||||
# balance = allow - total
|
||||
token_allow = fields.Integer('Token Allow')
|
||||
|
||||
@@ -6,10 +6,12 @@ from odoo import fields, models
|
||||
class ResUsers(models.Model):
|
||||
_inherit = "res.users"
|
||||
|
||||
gpt_id = fields.Many2one('ai.robot', string='Bind to ChatGpt')
|
||||
# 改为在 partner中设置,用户处绑定
|
||||
gpt_id = fields.Many2one('ai.robot', string='Bind to Ai', related='partner_id.gpt_id', inherited=True, readonly=False)
|
||||
gpt_policy = fields.Selection([
|
||||
('all', 'All Users'),
|
||||
('limit', 'Selected Users')
|
||||
], string='Allowed Conversation Mode', default='all', ondelete='set default')
|
||||
gpt_wl_users = fields.Many2many('res.users', 'res_users_res_users_rel', 'robot_id', 'user_id', string='Allowed Users', domain="[('id', '!=', id)]")
|
||||
gpt_wl_partners = fields.Many2many('res.partner', 'res_partner_ai_use', 'ai_user_id', 'name', string='Allowed Partners')
|
||||
gpt_demo_time = fields.Integer('Default Demo Time', default=0)
|
||||
is_chat_private = fields.Boolean('Allow Chat Private', default=False, related='partner_id.is_chat_private', inherited=True, readonly=False)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,6 @@
|
||||
"owned_by": "openai",
|
||||
"permission": [
|
||||
{
|
||||
"id": "modelperm-ZErASyl63fhYUeMMk7QKOHAB",
|
||||
"object": "model_permission",
|
||||
"created": 1677691854,
|
||||
"allow_create_engine": false,
|
||||
|
||||
Reference in New Issue
Block a user