mirror of
https://github.com/guohuadeng/app-odoo.git
synced 2025-02-23 04:11:36 +02:00
prepare #I6SC9C 处理azure私聊群聊,app_chatgpt优化,指定用户时增加使用情况
This commit is contained in:
@@ -128,8 +128,29 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def get_ai_post(self, res, author_id=False, answer_id=False, **kwargs):
|
def get_ai_post(self, res, author_id=False, answer_id=False, **kwargs):
|
||||||
# res = self.filter_sensitive_words(res)
|
if res and isinstance(res, dict):
|
||||||
return res
|
data = res['content'].replace(' .', '.').strip()
|
||||||
|
if 'usage' in res:
|
||||||
|
usage = res['usage']
|
||||||
|
prompt_tokens = usage['prompt_tokens']
|
||||||
|
completion_tokens = usage['completion_tokens']
|
||||||
|
total_tokens = usage['total_tokens']
|
||||||
|
vals = {
|
||||||
|
'human_prompt_tokens': author_id.human_prompt_tokens + prompt_tokens,
|
||||||
|
'ai_completion_tokens': author_id.ai_completion_tokens + completion_tokens,
|
||||||
|
'tokens_total': author_id.tokens_total + total_tokens,
|
||||||
|
'used_number': author_id.used_number + 1,
|
||||||
|
}
|
||||||
|
if not author_id.first_ask_time:
|
||||||
|
ask_date = fields.Datetime.now()
|
||||||
|
vals.update({
|
||||||
|
'first_ask_time': ask_date
|
||||||
|
})
|
||||||
|
author_id.write(vals)
|
||||||
|
# res = self.filter_sensitive_words(data)
|
||||||
|
else:
|
||||||
|
data = res
|
||||||
|
return data
|
||||||
|
|
||||||
def get_ai_system(self, content=None):
|
def get_ai_system(self, content=None):
|
||||||
# 获取基础ai角色设定, role system
|
# 获取基础ai角色设定, role system
|
||||||
@@ -187,7 +208,7 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
# Ai角色设定
|
# Ai角色设定
|
||||||
sys_content = self.get_ai_system(kwargs.get('sys_content'))
|
sys_content = self.get_ai_system(kwargs.get('sys_content'))
|
||||||
if sys_content:
|
if sys_content:
|
||||||
messages.insert(sys_content)
|
messages.insert(0, sys_content)
|
||||||
pdata = {
|
pdata = {
|
||||||
"model": self.ai_model,
|
"model": self.ai_model,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
@@ -201,25 +222,7 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
_logger.warning('=====================open input pdata: %s' % pdata)
|
_logger.warning('=====================open input pdata: %s' % pdata)
|
||||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||||
try:
|
try:
|
||||||
# todo: 将 res 总结果给 self.get_ai_post,再取实际内容 return,将 tokens 计算写在 post 方法中
|
|
||||||
res = response.json()
|
res = response.json()
|
||||||
if 'usage' in res:
|
|
||||||
usage = res['usage']
|
|
||||||
prompt_tokens = usage['prompt_tokens']
|
|
||||||
completion_tokens = usage['completion_tokens']
|
|
||||||
total_tokens = usage['total_tokens']
|
|
||||||
vals = {
|
|
||||||
'human_prompt_tokens': author_id.human_prompt_tokens + prompt_tokens,
|
|
||||||
'ai_completion_tokens': author_id.ai_completion_tokens + completion_tokens,
|
|
||||||
'tokens_total': author_id.tokens_total + total_tokens,
|
|
||||||
'used_number': author_id.used_number + 1,
|
|
||||||
}
|
|
||||||
if not author_id.first_ask_time:
|
|
||||||
ask_date = response.headers.get("Date")
|
|
||||||
vals.update({
|
|
||||||
'first_ask_time': ask_date
|
|
||||||
})
|
|
||||||
author_id.write(vals)
|
|
||||||
if 'choices' in res:
|
if 'choices' in res:
|
||||||
# for rec in res:
|
# for rec in res:
|
||||||
# res = rec['message']['content']
|
# res = rec['message']['content']
|
||||||
@@ -278,7 +281,7 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
# Ai角色设定
|
# Ai角色设定
|
||||||
sys_content = self.get_ai_system(kwargs.get('sys_content'))
|
sys_content = self.get_ai_system(kwargs.get('sys_content'))
|
||||||
if sys_content:
|
if sys_content:
|
||||||
messages.insert(sys_content)
|
messages.insert(0, sys_content)
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
engine=self.engine,
|
engine=self.engine,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
@@ -287,9 +290,11 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
top_p=self.top_p or 0.6,
|
top_p=self.top_p or 0.6,
|
||||||
frequency_penalty=self.frequency_penalty or 0.5,
|
frequency_penalty=self.frequency_penalty or 0.5,
|
||||||
presence_penalty=self.presence_penalty or 0.2,
|
presence_penalty=self.presence_penalty or 0.2,
|
||||||
stop=stop)
|
stop=stop,
|
||||||
|
request_timeout=self.ai_timeout or 120,
|
||||||
|
)
|
||||||
if 'choices' in response:
|
if 'choices' in response:
|
||||||
res = response['choices'][0]['message']['content'].replace(' .', '.').strip()
|
res = response['choices'][0]['message']
|
||||||
return res
|
return res
|
||||||
else:
|
else:
|
||||||
_logger.warning('=====================azure output data: %s' % response)
|
_logger.warning('=====================azure output data: %s' % response)
|
||||||
|
|||||||
@@ -87,7 +87,6 @@ fuck温总理
|
|||||||
插阿姨的洞洞舔女朋友的下身
|
插阿姨的洞洞舔女朋友的下身
|
||||||
驶你爸
|
驶你爸
|
||||||
轮奸做爱高潮录象
|
轮奸做爱高潮录象
|
||||||
测试
|
|
||||||
赛你老母
|
赛你老母
|
||||||
內射
|
內射
|
||||||
李岚清
|
李岚清
|
||||||
|
|||||||
@@ -14,13 +14,13 @@ _logger = logging.getLogger(__name__)
|
|||||||
class Channel(models.Model):
|
class Channel(models.Model):
|
||||||
_inherit = 'mail.channel'
|
_inherit = 'mail.channel'
|
||||||
|
|
||||||
def get_openai_context(self, channel_id, author_id, answer_id, minutes=30):
|
def get_openai_context(self, channel_id, author_id, answer_id, minutes=30, chat_count=1):
|
||||||
# 上下文处理,要处理群的方式,以及独聊的方式
|
# 上下文处理,要处理群的方式,以及独聊的方式
|
||||||
# azure新api 处理
|
# azure新api 处理
|
||||||
context_history = []
|
context_history = []
|
||||||
afterTime = fields.Datetime.now() - datetime.timedelta(minutes=minutes)
|
afterTime = fields.Datetime.now() - datetime.timedelta(minutes=minutes)
|
||||||
message_model = self.env['mail.message'].sudo()
|
message_model = self.env['mail.message'].sudo()
|
||||||
# 处理消息: 取最新问题 + 上2次的交互,将之前的交互按时间顺序拼接
|
# 处理消息: 取最新问题 + 上 chat_count=1次的交互,将之前的交互按时间顺序拼接。
|
||||||
# 注意: ai 每一次回复都有 parent_id 来处理连续性
|
# 注意: ai 每一次回复都有 parent_id 来处理连续性
|
||||||
# 私聊处理
|
# 私聊处理
|
||||||
domain = [('res_id', '=', channel_id),
|
domain = [('res_id', '=', channel_id),
|
||||||
@@ -32,17 +32,18 @@ class Channel(models.Model):
|
|||||||
if self.channel_type in ['group', 'channel']:
|
if self.channel_type in ['group', 'channel']:
|
||||||
# 群聊增加时间限制,当前找所有人,不限制 author_id
|
# 群聊增加时间限制,当前找所有人,不限制 author_id
|
||||||
domain += [('date', '>=', afterTime)]
|
domain += [('date', '>=', afterTime)]
|
||||||
ai_msg_list = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=2)
|
ai_msg_list = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=chat_count)
|
||||||
for ai_msg in ai_msg_list.sorted(key='id'):
|
for ai_msg in ai_msg_list:
|
||||||
user_content = ai_msg.parent_id.body.replace("<p>", "").replace("</p>", "")
|
user_content = ai_msg.parent_id.description.replace("<p>", "").replace("</p>", "").replace('@%s' % answer_id.name, '').lstrip()
|
||||||
ai_content = ai_msg.body.replace("<p>", "").replace("</p>", "")
|
ai_content = str(ai_msg.body).replace("<p>", "").replace("</p>", "").replace("<p>", "")
|
||||||
context_history.append({
|
context_history.insert(0, {
|
||||||
'role': 'user',
|
|
||||||
'content': user_content,
|
|
||||||
}, {
|
|
||||||
'role': 'assistant',
|
'role': 'assistant',
|
||||||
'content': ai_content,
|
'content': ai_content,
|
||||||
})
|
})
|
||||||
|
context_history.insert(0, {
|
||||||
|
'role': 'user',
|
||||||
|
'content': user_content,
|
||||||
|
})
|
||||||
return context_history
|
return context_history
|
||||||
|
|
||||||
def get_ai_response(self, ai, messages, channel, user_id, message):
|
def get_ai_response(self, ai, messages, channel, user_id, message):
|
||||||
@@ -88,8 +89,8 @@ class Channel(models.Model):
|
|||||||
ai = user_id.gpt_id
|
ai = user_id.gpt_id
|
||||||
|
|
||||||
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
||||||
|
msg = message.description.replace('@%s' % answer_id.name, '').lstrip()
|
||||||
|
|
||||||
msg = msg_vals.get('body')
|
|
||||||
# print('prompt:', prompt)
|
# print('prompt:', prompt)
|
||||||
# print('-----')
|
# print('-----')
|
||||||
if not msg:
|
if not msg:
|
||||||
@@ -102,41 +103,38 @@ class Channel(models.Model):
|
|||||||
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
|
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
|
||||||
return rdata
|
return rdata
|
||||||
try:
|
try:
|
||||||
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 600
|
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60
|
||||||
except:
|
except:
|
||||||
openapi_context_timeout = 600
|
openapi_context_timeout = 60
|
||||||
sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config')
|
sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config')
|
||||||
openai.api_key = api_key
|
openai.api_key = api_key
|
||||||
# print(msg_vals)
|
# print(msg_vals)
|
||||||
# print(msg_vals.get('record_name', ''))
|
# print(msg_vals.get('record_name', ''))
|
||||||
# print('self.channel_type :',self.channel_type)
|
# print('self.channel_type :',self.channel_type)
|
||||||
if ai:
|
if ai:
|
||||||
|
# 非4版本,取0次。其它取3 次历史
|
||||||
|
chat_count = 0 if '4' in ai.ai_model else 3
|
||||||
if author_id != answer_id.id and self.channel_type == 'chat':
|
if author_id != answer_id.id and self.channel_type == 'chat':
|
||||||
|
# 私聊
|
||||||
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
||||||
try:
|
|
||||||
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
|
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
|
||||||
# if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
||||||
|
# 公开的群聊
|
||||||
|
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
||||||
|
channel = chatgpt_channel_id
|
||||||
|
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and self.channel_type in ['group', 'channel']:
|
||||||
|
# 其它群聊
|
||||||
|
channel = self
|
||||||
|
|
||||||
|
try:
|
||||||
messages = [{"role": "user", "content": msg}]
|
messages = [{"role": "user", "content": msg}]
|
||||||
c_history = self.get_openai_context(channel.id, author_id, answer_id, openapi_context_timeout)
|
c_history = self.get_openai_context(channel.id, author_id, answer_id, openapi_context_timeout, chat_count)
|
||||||
if c_history:
|
if c_history:
|
||||||
messages.insert(0, c_history)
|
messages += c_history
|
||||||
if sync_config == 'sync':
|
if sync_config == 'sync':
|
||||||
self.get_ai_response(ai, messages, channel, user_id, message)
|
self.get_ai_response(ai, messages, channel, user_id, message)
|
||||||
else:
|
else:
|
||||||
self.with_delay().get_ai_response(ai, messages, channel, user_id, message)
|
self.with_delay().get_ai(ai, messages, channel, user_id, message)
|
||||||
except Exception as e:
|
|
||||||
raise UserError(_(e))
|
|
||||||
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
|
||||||
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
|
||||||
try:
|
|
||||||
messages = [{"role": "user", "content": msg}]
|
|
||||||
c_history = self.get_openai_context(chatgpt_channel_id.id, author_id, answer_id, openapi_context_timeout)
|
|
||||||
if c_history:
|
|
||||||
messages.insert(0, c_history)
|
|
||||||
if sync_config == 'sync':
|
|
||||||
self.get_ai_response(ai, messages, chatgpt_channel_id, user_id, message)
|
|
||||||
else:
|
|
||||||
self.with_delay().get_ai(ai, messages, chatgpt_channel_id, user_id, message)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise UserError(_(e))
|
raise UserError(_(e))
|
||||||
|
|
||||||
|
|||||||
@@ -7,12 +7,14 @@ from .lib.WordsSearch import WordsSearch
|
|||||||
class MailThread(models.AbstractModel):
|
class MailThread(models.AbstractModel):
|
||||||
_inherit = "mail.thread"
|
_inherit = "mail.thread"
|
||||||
|
|
||||||
@api.returns('mail.message', lambda value: value.id)
|
# todo: 不是说在此屏蔽,而是当用户发出敏感词时,提示下,同时不发送到 ai
|
||||||
def message_post(self, **kwargs):
|
# @api.returns('mail.message', lambda value: value.id)
|
||||||
self.ensure_one()
|
# def message_post(self, **kwargs):
|
||||||
search = WordsSearch()
|
|
||||||
search.SetKeywords([])
|
# self.ensure_one()
|
||||||
body = kwargs.get('body', False)
|
# search = WordsSearch()
|
||||||
body = search.Replace(text=body)
|
# search.SetKeywords([])
|
||||||
kwargs.update({'body': body})
|
# body = kwargs.get('body', False)
|
||||||
return super(MailThread, self).message_post(**kwargs)
|
# body = search.Replace(text=body)
|
||||||
|
# kwargs.update({'body': body})
|
||||||
|
# return super(MailThread, self).message_post(**kwargs)
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from odoo import fields, models
|
|||||||
class ResConfigSettings(models.TransientModel):
|
class ResConfigSettings(models.TransientModel):
|
||||||
_inherit = "res.config.settings"
|
_inherit = "res.config.settings"
|
||||||
|
|
||||||
openapi_context_timeout = fields.Integer(string="Connect Timout", help="多少秒以内的聊天信息作为上下文继续", config_parameter="app_chatgpt.openapi_context_timeout")
|
openapi_context_timeout = fields.Integer(string="Connect Timout", help="群聊中多少分钟以内的聊天信息作为上下文继续", config_parameter="app_chatgpt.openapi_context_timeout")
|
||||||
openai_sync_config = fields.Selection([
|
openai_sync_config = fields.Selection([
|
||||||
('sync', 'Synchronous'),
|
('sync', 'Synchronous'),
|
||||||
('async', 'Asynchronous')
|
('async', 'Asynchronous')
|
||||||
|
|||||||
Reference in New Issue
Block a user