mirror of
https://github.com/guohuadeng/app-odoo.git
synced 2025-02-23 04:11:36 +02:00
prepare #I6SC9C 处理azure私聊,app_chatgpt优化,指定用户时增加使用情况 置顶
处理azure 私聊
This commit is contained in:
@@ -23,6 +23,7 @@
|
|||||||
<field name="provider">azure</field>
|
<field name="provider">azure</field>
|
||||||
<field name="endpoint">https://my.openai.azure.com</field>
|
<field name="endpoint">https://my.openai.azure.com</field>
|
||||||
<field name="engine">gpt35</field>
|
<field name="engine">gpt35</field>
|
||||||
|
<field name="api_version">2023-03-15-preview</field>
|
||||||
<field name="sequence">8</field>
|
<field name="sequence">8</field>
|
||||||
</record>
|
</record>
|
||||||
<record id="chatgpt4_azure" model="ai.robot">
|
<record id="chatgpt4_azure" model="ai.robot">
|
||||||
@@ -30,6 +31,7 @@
|
|||||||
<field name="provider">azure</field>
|
<field name="provider">azure</field>
|
||||||
<field name="endpoint">https://my.openai.azure.com</field>
|
<field name="endpoint">https://my.openai.azure.com</field>
|
||||||
<field name="engine">gpt4</field>
|
<field name="engine">gpt4</field>
|
||||||
|
<field name="api_version">2023-03-15-preview</field>
|
||||||
<field name="sequence">9</field>
|
<field name="sequence">9</field>
|
||||||
</record>
|
</record>
|
||||||
</odoo>
|
</odoo>
|
||||||
@@ -1399,7 +1399,7 @@ msgid "Mahjong red dragon"
|
|||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#. module: app_chatgpt
|
#. module: app_chatgpt
|
||||||
#: model:ir.model.fields,field_description:app_chatgpt.field_ai_robot__max_length
|
#: model:ir.model.fields,field_description:app_chatgpt.field_ai_robot__max_tokens
|
||||||
msgid "Max Length"
|
msgid "Max Length"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
|
|||||||
@@ -37,8 +37,63 @@ Moderation: A fine-tuned model that can detect whether text may be sensitive or
|
|||||||
GPT-3 A set of models that can understand and generate natural language
|
GPT-3 A set of models that can understand and generate natural language
|
||||||
""")
|
""")
|
||||||
openapi_api_key = fields.Char(string="API Key", help="Provide the API key here")
|
openapi_api_key = fields.Char(string="API Key", help="Provide the API key here")
|
||||||
temperature = fields.Float(string='Temperature', default=0.9)
|
# begin gpt 参数
|
||||||
max_length = fields.Integer('Max Length', default=300)
|
# 1. stop:表示聊天机器人停止生成回复的条件,可以是一段文本或者一个列表,当聊天机器人生成的回复中包含了这个条件,就会停止继续生成回复。
|
||||||
|
# 2. temperature:控制回复的“新颖度”,值越高,聊天机器人生成的回复越不确定和随机,值越低,聊天机器人生成的回复会更加可预测和常规化。
|
||||||
|
# 3. top_p:与temperature有些类似,也是控制回复的“新颖度”。不同的是,top_p控制的是回复中概率最高的几个可能性的累计概率之和,值越小,生成的回复越保守,值越大,生成的回复越新颖。
|
||||||
|
# 4. frequency_penalty:用于控制聊天机器人回复中出现频率过高的词汇的惩罚程度。聊天机器人会尝试避免在回复中使用频率较高的词汇,以提高回复的多样性和新颖度。
|
||||||
|
# 5. presence_penalty:与frequency_penalty相对,用于控制聊天机器人回复中出现频率较低的词汇的惩罚程度。聊天机器人会尝试在回复中使用频率较低的词汇,以提高回复的多样性和新颖度。
|
||||||
|
max_tokens = fields.Integer('Max response', default=600,
|
||||||
|
help="""
|
||||||
|
Set a limit on the number of tokens per model response.
|
||||||
|
The API supports a maximum of 4000 tokens shared between the prompt
|
||||||
|
(including system message, examples, message history, and user query) and the model's response.
|
||||||
|
One token is roughly 4 characters for typical English text.
|
||||||
|
""")
|
||||||
|
temperature = fields.Float(string='Temperature', default=0.9,
|
||||||
|
help="""
|
||||||
|
Controls randomness. Lowering the temperature means that the model will produce
|
||||||
|
more repetitive and deterministic responses.
|
||||||
|
Increasing the temperature will result in more unexpected or creative responses.
|
||||||
|
Try adjusting temperature or Top P but not both.
|
||||||
|
""")
|
||||||
|
top_p = fields.Float('Top probabilities', default=0.6,
|
||||||
|
help="""
|
||||||
|
Similar to temperature, this controls randomness but uses a different method.
|
||||||
|
Lowering Top P will narrow the model’s token selection to likelier tokens.
|
||||||
|
Increasing Top P will let the model choose from tokens with both high and low likelihood.
|
||||||
|
Try adjusting temperature or Top P but not both
|
||||||
|
""")
|
||||||
|
# 避免使用常用词
|
||||||
|
frequency_penalty = fields.Float('Frequency penalty', default=0.5,
|
||||||
|
help="""
|
||||||
|
Reduce the chance of repeating a token proportionally based on how often it has appeared in the text so far.
|
||||||
|
This decreases the likelihood of repeating the exact same text in a response.
|
||||||
|
""")
|
||||||
|
# 避免使用生僻词
|
||||||
|
presence_penalty = fields.Float('Presence penalty', default=0.2,
|
||||||
|
help="""
|
||||||
|
Reduce the chance of repeating any token that has appeared in the text at all so far.
|
||||||
|
This increases the likelihood of introducing new topics in a response.
|
||||||
|
""")
|
||||||
|
# 停止回复的关键词
|
||||||
|
stop = fields.Char('Stop sequences',
|
||||||
|
help="""
|
||||||
|
Use , to separate the stop key word.
|
||||||
|
Make responses stop at a desired point, such as the end of a sentence or list.
|
||||||
|
Specify up to four sequences where the model will stop generating further tokens in a response.
|
||||||
|
The returned text will not contain the stop sequence.
|
||||||
|
""")
|
||||||
|
# 角色设定
|
||||||
|
sys_content = fields.Char('System message',
|
||||||
|
help="""
|
||||||
|
Give the model instructions about how it should behave and any context it should reference when generating a response.
|
||||||
|
You can describe the assistant’s personality,
|
||||||
|
tell it what it should and shouldn’t answer, and tell it how to format responses.
|
||||||
|
There’s no token limit for this section, but it will be included with every API call,
|
||||||
|
so it counts against the overall token limit.
|
||||||
|
""")
|
||||||
|
# end gpt 参数
|
||||||
endpoint = fields.Char('End Point', default='https://api.openai.com/v1/chat/completions')
|
endpoint = fields.Char('End Point', default='https://api.openai.com/v1/chat/completions')
|
||||||
engine = fields.Char('Engine', help='If use Azure, Please input the Model deployment name.')
|
engine = fields.Char('Engine', help='If use Azure, Please input the Model deployment name.')
|
||||||
api_version = fields.Char('API Version', default='2022-12-01')
|
api_version = fields.Char('API Version', default='2022-12-01')
|
||||||
@@ -50,32 +105,39 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
def action_disconnect(self):
|
def action_disconnect(self):
|
||||||
requests.delete('https://chatgpt.com/v1/disconnect')
|
requests.delete('https://chatgpt.com/v1/disconnect')
|
||||||
|
|
||||||
def get_ai(self, data, sender_id=False, answer_id=False, **kwargs):
|
def get_ai(self, data, author_id=False, answer_id=False, **kwargs):
|
||||||
# 通用方法
|
# 通用方法
|
||||||
# sender_id: 请求的 partner_id 对象
|
# author_id: 请求的 partner_id 对象
|
||||||
# answer_id: 回答的 partner_id 对象
|
# answer_id: 回答的 partner_id 对象
|
||||||
# kwargs,dict 形式的可变参数
|
# kwargs,dict 形式的可变参数
|
||||||
self.ensure_one()
|
self.ensure_one()
|
||||||
# 前置勾子,一般返回 False,有问题返回响应内容
|
# 前置勾子,一般返回 False,有问题返回响应内容
|
||||||
res_pre = self.get_ai_pre(data, sender_id, answer_id, **kwargs)
|
res_pre = self.get_ai_pre(data, author_id, answer_id, **kwargs)
|
||||||
if res_pre:
|
if res_pre:
|
||||||
return res_pre
|
return res_pre
|
||||||
if hasattr(self, 'get_%s' % self.provider):
|
if hasattr(self, 'get_%s' % self.provider):
|
||||||
res = getattr(self, 'get_%s' % self.provider)(data, sender_id, answer_id, **kwargs)
|
res = getattr(self, 'get_%s' % self.provider)(data, author_id, answer_id, **kwargs)
|
||||||
else:
|
else:
|
||||||
res = _('No robot provider found')
|
res = _('No robot provider found')
|
||||||
|
|
||||||
# 后置勾子,返回处理后的内容,用于处理敏感词等
|
# 后置勾子,返回处理后的内容,用于处理敏感词等
|
||||||
res_post = self.get_ai_post(res, sender_id, answer_id, **kwargs)
|
res_post = self.get_ai_post(res, author_id, answer_id, **kwargs)
|
||||||
return res_post
|
return res_post
|
||||||
|
|
||||||
def get_ai_pre(self, data, sender_id=False, answer_id=False, **kwargs):
|
def get_ai_pre(self, data, author_id=False, answer_id=False, **kwargs):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_ai_post(self, res, sender_id=False, answer_id=False, **kwargs):
|
def get_ai_post(self, res, author_id=False, answer_id=False, **kwargs):
|
||||||
res = self.filter_sensitive_words(res)
|
# res = self.filter_sensitive_words(res)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def get_ai_system(self, content=None):
|
||||||
|
# 获取基础ai角色设定, role system
|
||||||
|
sys_content = content or self.sys_content
|
||||||
|
if sys_content:
|
||||||
|
return {"role": "system", "content": sys_content}
|
||||||
|
return {}
|
||||||
|
|
||||||
def get_ai_model_info(self):
|
def get_ai_model_info(self):
|
||||||
self.ensure_one()
|
self.ensure_one()
|
||||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||||
@@ -109,19 +171,63 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
r_text = 'No response.'
|
r_text = 'No response.'
|
||||||
raise UserError(r_text)
|
raise UserError(r_text)
|
||||||
|
|
||||||
def get_openai(self, data, sender_id, answer_id, *args):
|
def get_openai(self, data, author_id, answer_id, **kwargs):
|
||||||
self.ensure_one()
|
self.ensure_one()
|
||||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||||
R_TIMEOUT = self.ai_timeout or 120
|
R_TIMEOUT = self.ai_timeout or 120
|
||||||
o_url = self.endpoint or "https://api.openai.com/v1/chat/completions"
|
o_url = self.endpoint or "https://api.openai.com/v1/chat/completions"
|
||||||
partner_name = 'odoo'
|
|
||||||
# if sender_id:
|
if self.stop:
|
||||||
# partner_name = sender_id.name
|
stop = self.stop.split(',')
|
||||||
|
else:
|
||||||
|
stop = ["Human:", "AI:"]
|
||||||
# 以下处理 open ai
|
# 以下处理 open ai
|
||||||
# 获取模型信息
|
if self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||||
# list_model = requests.get("https://api.openai.com/v1/models", headers=headers)
|
messages = [{"role": "user", "content": data}]
|
||||||
# model_info = requests.get("https://api.openai.com/v1/models/%s" % ai_model, headers=headers)
|
# Ai角色设定
|
||||||
if self.ai_model == 'dall-e2':
|
sys_content = self.get_ai_system(kwargs.get('sys_content'))
|
||||||
|
if sys_content:
|
||||||
|
messages.insert(sys_content)
|
||||||
|
pdata = {
|
||||||
|
"model": self.ai_model,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": self.temperature or 0.9,
|
||||||
|
"max_tokens": self.max_tokens or 1000,
|
||||||
|
"top_p": self.top_p or 0.6,
|
||||||
|
"frequency_penalty": self.frequency_penalty or 0.5,
|
||||||
|
"presence_penalty": self.presence_penalty or 0.2,
|
||||||
|
"stop": stop
|
||||||
|
}
|
||||||
|
_logger.warning('=====================open input pdata: %s' % pdata)
|
||||||
|
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||||
|
try:
|
||||||
|
# todo: 将 res 总结果给 self.get_ai_post,再取实际内容 return,将 tokens 计算写在 post 方法中
|
||||||
|
res = response.json()
|
||||||
|
if 'usage' in res:
|
||||||
|
usage = res['usage']
|
||||||
|
prompt_tokens = usage['prompt_tokens']
|
||||||
|
completion_tokens = usage['completion_tokens']
|
||||||
|
total_tokens = usage['total_tokens']
|
||||||
|
vals = {
|
||||||
|
'human_prompt_tokens': author_id.human_prompt_tokens + prompt_tokens,
|
||||||
|
'ai_completion_tokens': author_id.ai_completion_tokens + completion_tokens,
|
||||||
|
'tokens_total': author_id.tokens_total + total_tokens,
|
||||||
|
'used_number': author_id.used_number + 1,
|
||||||
|
}
|
||||||
|
if not author_id.first_ask_time:
|
||||||
|
ask_date = response.headers.get("Date")
|
||||||
|
vals.update({
|
||||||
|
'first_ask_time': ask_date
|
||||||
|
})
|
||||||
|
author_id.write(vals)
|
||||||
|
if 'choices' in res:
|
||||||
|
# for rec in res:
|
||||||
|
# res = rec['message']['content']
|
||||||
|
res = '\n'.join([x['message']['content'] for x in res['choices']])
|
||||||
|
return res
|
||||||
|
except Exception as e:
|
||||||
|
_logger.warning("Get Response Json failed: %s", e)
|
||||||
|
elif self.ai_model == 'dall-e2':
|
||||||
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
||||||
# image_url = response['data'][0]['url']
|
# image_url = response['data'][0]['url']
|
||||||
# https://platform.openai.com/docs/guides/images/introduction
|
# https://platform.openai.com/docs/guides/images/introduction
|
||||||
@@ -131,56 +237,15 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
"size": "1024x1024",
|
"size": "1024x1024",
|
||||||
}
|
}
|
||||||
return '建设中'
|
return '建设中'
|
||||||
elif self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
|
||||||
pdata = {
|
|
||||||
"model": self.ai_model,
|
|
||||||
"messages": [{"role": "user", "content": data}],
|
|
||||||
"temperature": 0.9,
|
|
||||||
"max_tokens": self.max_length or 1000,
|
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0.0,
|
|
||||||
"presence_penalty": 0.6,
|
|
||||||
"user": partner_name,
|
|
||||||
"stop": ["Human:", "AI:"]
|
|
||||||
}
|
|
||||||
_logger.warning('=====================open input pdata: %s' % pdata)
|
|
||||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
|
||||||
try:
|
|
||||||
res = response.json()
|
|
||||||
if 'usage' in res:
|
|
||||||
usage = res['usage']
|
|
||||||
prompt_tokens = usage['prompt_tokens']
|
|
||||||
completion_tokens = usage['completion_tokens']
|
|
||||||
total_tokens = usage['total_tokens']
|
|
||||||
vals = {
|
|
||||||
'human_prompt_tokens': sender_id.human_prompt_tokens + prompt_tokens,
|
|
||||||
'ai_completion_tokens': sender_id.ai_completion_tokens + completion_tokens,
|
|
||||||
'tokens_total': sender_id.tokens_total + total_tokens,
|
|
||||||
'used_number': sender_id.used_number + 1,
|
|
||||||
}
|
|
||||||
if not sender_id.first_ask_time:
|
|
||||||
ask_date = response.headers.get("Date")
|
|
||||||
vals.update({
|
|
||||||
'first_ask_time': ask_date
|
|
||||||
})
|
|
||||||
sender_id.write(vals)
|
|
||||||
if 'choices' in res:
|
|
||||||
# for rec in res:
|
|
||||||
# res = rec['message']['content']
|
|
||||||
res = '\n'.join([x['message']['content'] for x in res['choices']])
|
|
||||||
return res
|
|
||||||
except Exception as e:
|
|
||||||
_logger.warning("Get Response Json failed: %s", e)
|
|
||||||
else:
|
else:
|
||||||
pdata = {
|
pdata = {
|
||||||
"model": self.ai_model,
|
"model": self.ai_model,
|
||||||
"prompt": data,
|
"prompt": data,
|
||||||
"temperature": 0.9,
|
"temperature": 0.9,
|
||||||
"max_tokens": self.max_length or 1000,
|
"max_tokens": self.max_tokens or 1000,
|
||||||
"top_p": 1,
|
"top_p": 1,
|
||||||
"frequency_penalty": 0.0,
|
"frequency_penalty": 0.0,
|
||||||
"presence_penalty": 0.6,
|
"presence_penalty": 0.6,
|
||||||
"user": partner_name,
|
|
||||||
"stop": ["Human:", "AI:"]
|
"stop": ["Human:", "AI:"]
|
||||||
}
|
}
|
||||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||||
@@ -189,9 +254,9 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
res = '\n'.join([x['text'] for x in res['choices']])
|
res = '\n'.join([x['text'] for x in res['choices']])
|
||||||
return res
|
return res
|
||||||
|
|
||||||
return "获取结果超时,请重新跟我聊聊。"
|
return _("Response Timeout, please speak again.")
|
||||||
|
|
||||||
def get_azure(self, data, sender_id, answer_id, *args):
|
def get_azure(self, data, author_id, answer_id, **kwargs):
|
||||||
self.ensure_one()
|
self.ensure_one()
|
||||||
# only for azure
|
# only for azure
|
||||||
openai.api_type = self.provider
|
openai.api_type = self.provider
|
||||||
@@ -202,21 +267,29 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
raise UserError(_("Please Set your AI robot's API Version first."))
|
raise UserError(_("Please Set your AI robot's API Version first."))
|
||||||
openai.api_version = self.api_version
|
openai.api_version = self.api_version
|
||||||
openai.api_key = self.openapi_api_key
|
openai.api_key = self.openapi_api_key
|
||||||
pdata = {
|
if self.stop:
|
||||||
"engine": self.engine,
|
stop = self.stop.split(',')
|
||||||
"prompt": data,
|
else:
|
||||||
"temperature": self.temperature or 0.9,
|
stop = ["Human:", "AI:"]
|
||||||
"max_tokens": self.max_length or 600,
|
if isinstance(data, list):
|
||||||
"top_p": 0.5,
|
messages = data
|
||||||
"frequency_penalty": 0,
|
else:
|
||||||
"presence_penalty": 0,
|
messages = [{"role": "user", "content": data}]
|
||||||
"stop": ["Human:", "AI:"],
|
# Ai角色设定
|
||||||
}
|
sys_content = self.get_ai_system(kwargs.get('sys_content'))
|
||||||
_logger.warning('=====================azure input data: %s' % pdata)
|
if sys_content:
|
||||||
response = openai.Completion.create(pdata)
|
messages.insert(sys_content)
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
engine=self.engine,
|
||||||
|
messages=messages,
|
||||||
|
temperature=self.temperature or 0.9,
|
||||||
|
max_tokens=self.max_tokens or 600,
|
||||||
|
top_p=self.top_p or 0.6,
|
||||||
|
frequency_penalty=self.frequency_penalty or 0.5,
|
||||||
|
presence_penalty=self.presence_penalty or 0.2,
|
||||||
|
stop=stop)
|
||||||
if 'choices' in response:
|
if 'choices' in response:
|
||||||
res = response['choices'][0]['text'].replace(' .', '.').strip()
|
res = response['choices'][0]['message']['content'].replace(' .', '.').strip()
|
||||||
return res
|
return res
|
||||||
else:
|
else:
|
||||||
_logger.warning('=====================azure output data: %s' % response)
|
_logger.warning('=====================azure output data: %s' % response)
|
||||||
|
|||||||
@@ -977,7 +977,6 @@ GY
|
|||||||
有容奶大
|
有容奶大
|
||||||
李总统
|
李总统
|
||||||
操你媽
|
操你媽
|
||||||
GP
|
|
||||||
你的逼……真紧
|
你的逼……真紧
|
||||||
GN
|
GN
|
||||||
GM
|
GM
|
||||||
@@ -1412,7 +1411,6 @@ blowjobs
|
|||||||
耶和華
|
耶和華
|
||||||
奶大穴肥多条肉棒难满足
|
奶大穴肥多条肉棒难满足
|
||||||
瞿秋白
|
瞿秋白
|
||||||
gp
|
|
||||||
dong fang chuan shuo
|
dong fang chuan shuo
|
||||||
saga
|
saga
|
||||||
藏春阁【全免费】
|
藏春阁【全免费】
|
||||||
|
|||||||
@@ -14,40 +14,41 @@ _logger = logging.getLogger(__name__)
|
|||||||
class Channel(models.Model):
|
class Channel(models.Model):
|
||||||
_inherit = 'mail.channel'
|
_inherit = 'mail.channel'
|
||||||
|
|
||||||
@api.model
|
def get_openai_context(self, channel_id, author_id, answer_id, minutes=30):
|
||||||
def get_openai_context(self, channel_id, partner_chatgpt, current_prompt, seconds=600):
|
# 上下文处理,要处理群的方式,以及独聊的方式
|
||||||
afterTime = fields.Datetime.now() - datetime.timedelta(seconds=seconds)
|
# azure新api 处理
|
||||||
|
context_history = []
|
||||||
|
afterTime = fields.Datetime.now() - datetime.timedelta(minutes=minutes)
|
||||||
message_model = self.env['mail.message'].sudo()
|
message_model = self.env['mail.message'].sudo()
|
||||||
prompt = [f"Human:{current_prompt}\nAI:", ]
|
# 处理消息: 取最新问题 + 上2次的交互,将之前的交互按时间顺序拼接
|
||||||
|
# 注意: ai 每一次回复都有 parent_id 来处理连续性
|
||||||
|
# 私聊处理
|
||||||
domain = [('res_id', '=', channel_id),
|
domain = [('res_id', '=', channel_id),
|
||||||
('model', '=', 'mail.channel'),
|
('model', '=', 'mail.channel'),
|
||||||
('message_type', '!=', 'user_notification'),
|
('message_type', '!=', 'user_notification'),
|
||||||
('parent_id', '=', False),
|
('parent_id', '!=', False),
|
||||||
('date', '>=', afterTime),
|
('author_id', '=', answer_id.id),
|
||||||
('author_id', '=', self.env.user.partner_id.id)]
|
('body', '!=', '<p>%s</p>' % _('Response Timeout, please speak again.'))]
|
||||||
messages = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=15)
|
if self.channel_type in ['group', 'channel']:
|
||||||
# print('domain:',domain)
|
# 群聊增加时间限制,当前找所有人,不限制 author_id
|
||||||
# print('messages:',messages)
|
domain += [('date', '>=', afterTime)]
|
||||||
for msg in messages:
|
ai_msg_list = message_model.with_context(tz='UTC').search(domain, order="id desc", limit=2)
|
||||||
ai_msg = message_model.search([("res_id", "=", channel_id),
|
for ai_msg in ai_msg_list.sorted(key='id'):
|
||||||
('model', '=', msg.model),
|
user_content = ai_msg.parent_id.body.replace("<p>", "").replace("</p>", "")
|
||||||
('parent_id', '=', msg.id),
|
ai_content = ai_msg.body.replace("<p>", "").replace("</p>", "")
|
||||||
('author_id', '=', partner_chatgpt),
|
context_history.append({
|
||||||
('body', '!=', '<p>获取结果超时,请重新跟我聊聊。</p>')])
|
'role': 'user',
|
||||||
if ai_msg:
|
'content': user_content,
|
||||||
prompt.append("Human:%s\nAI:%s" % (
|
}, {
|
||||||
msg.body.replace("<p>", "").replace("</p>", ""), ai_msg.body.replace("<p>", "").replace("</p>", "")))
|
'role': 'assistant',
|
||||||
# print(msg.body.replace("<p>", "").replace("</p>", ""))
|
'content': ai_content,
|
||||||
# print(ai_msg.body.replace("<p>", "").replace("</p>", ""))
|
})
|
||||||
else:
|
return context_history
|
||||||
_logger.error(f"not find for id:{str(msg.id)}")
|
|
||||||
|
|
||||||
return '\n'.join(prompt[::-1])
|
def get_ai_response(self, ai, messages, channel, user_id, message):
|
||||||
|
author_id = message.create_uid.partner_id
|
||||||
def get_ai(self, ai, prompt, partner_name, channel, user_id, message):
|
|
||||||
sender_id = message.create_uid.partner_id
|
|
||||||
answer_id = user_id.partner_id
|
answer_id = user_id.partner_id
|
||||||
res = ai.get_ai(prompt, sender_id, answer_id)
|
res = ai.get_ai(messages, author_id, answer_id)
|
||||||
if res:
|
if res:
|
||||||
res = res.replace('\n', '<br/>')
|
res = res.replace('\n', '<br/>')
|
||||||
channel.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id)
|
channel.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id)
|
||||||
@@ -55,21 +56,21 @@ class Channel(models.Model):
|
|||||||
def _notify_thread(self, message, msg_vals=False, **kwargs):
|
def _notify_thread(self, message, msg_vals=False, **kwargs):
|
||||||
rdata = super(Channel, self)._notify_thread(message, msg_vals=msg_vals, **kwargs)
|
rdata = super(Channel, self)._notify_thread(message, msg_vals=msg_vals, **kwargs)
|
||||||
# print(f'rdata:{rdata}')
|
# print(f'rdata:{rdata}')
|
||||||
to_partner_id = self.env['res.partner']
|
answer_id = self.env['res.partner']
|
||||||
user_id = self.env['res.users']
|
user_id = self.env['res.users']
|
||||||
author_id = msg_vals.get('author_id')
|
author_id = msg_vals.get('author_id')
|
||||||
ai = self.env['ai.robot']
|
ai = self.env['ai.robot']
|
||||||
channel_type = self.channel_type
|
channel_type = self.channel_type
|
||||||
if channel_type == 'chat':
|
if channel_type == 'chat':
|
||||||
channel_partner_ids = self.channel_partner_ids
|
channel_partner_ids = self.channel_partner_ids
|
||||||
to_partner_id = channel_partner_ids - message.author_id
|
answer_id = channel_partner_ids - message.author_id
|
||||||
user_id = to_partner_id.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
|
user_id = answer_id.mapped('user_ids').filtered(lambda r: r.gpt_id)[:1]
|
||||||
if user_id and to_partner_id.gpt_id:
|
if user_id and answer_id.gpt_id:
|
||||||
gpt_policy = user_id.gpt_policy
|
gpt_policy = user_id.gpt_policy
|
||||||
gpt_wl_users = user_id.gpt_wl_users
|
gpt_wl_users = user_id.gpt_wl_users
|
||||||
is_allow = message.create_uid.id in gpt_wl_users.ids
|
is_allow = message.create_uid.id in gpt_wl_users.ids
|
||||||
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||||
ai = to_partner_id.gpt_id
|
ai = answer_id.gpt_id
|
||||||
|
|
||||||
elif channel_type in ['group', 'channel']:
|
elif channel_type in ['group', 'channel']:
|
||||||
# partner_ids = @ ids
|
# partner_ids = @ ids
|
||||||
@@ -82,16 +83,16 @@ class Channel(models.Model):
|
|||||||
gpt_policy = user_id.gpt_policy
|
gpt_policy = user_id.gpt_policy
|
||||||
gpt_wl_users = user_id.gpt_wl_users
|
gpt_wl_users = user_id.gpt_wl_users
|
||||||
is_allow = message.create_uid.id in gpt_wl_users.ids
|
is_allow = message.create_uid.id in gpt_wl_users.ids
|
||||||
to_partner_id = user_id.partner_id
|
answer_id = user_id.partner_id
|
||||||
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||||
ai = user_id.gpt_id
|
ai = user_id.gpt_id
|
||||||
|
|
||||||
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
||||||
|
|
||||||
prompt = msg_vals.get('body')
|
msg = msg_vals.get('body')
|
||||||
# print('prompt:', prompt)
|
# print('prompt:', prompt)
|
||||||
# print('-----')
|
# print('-----')
|
||||||
if not prompt:
|
if not msg:
|
||||||
return rdata
|
return rdata
|
||||||
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
|
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
|
||||||
api_key = ''
|
api_key = ''
|
||||||
@@ -106,48 +107,36 @@ class Channel(models.Model):
|
|||||||
openapi_context_timeout = 600
|
openapi_context_timeout = 600
|
||||||
sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config')
|
sync_config = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openai_sync_config')
|
||||||
openai.api_key = api_key
|
openai.api_key = api_key
|
||||||
partner_name = ''
|
|
||||||
# print(msg_vals)
|
# print(msg_vals)
|
||||||
# print(msg_vals.get('record_name', ''))
|
# print(msg_vals.get('record_name', ''))
|
||||||
# print('self.channel_type :',self.channel_type)
|
# print('self.channel_type :',self.channel_type)
|
||||||
if ai:
|
if ai:
|
||||||
if author_id != to_partner_id.id and self.channel_type == 'chat':
|
if author_id != answer_id.id and self.channel_type == 'chat':
|
||||||
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
||||||
try:
|
try:
|
||||||
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
|
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
|
||||||
# if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
# if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||||
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt, openapi_context_timeout)
|
messages = [{"role": "user", "content": msg}]
|
||||||
print(prompt)
|
c_history = self.get_openai_context(channel.id, author_id, answer_id, openapi_context_timeout)
|
||||||
|
if c_history:
|
||||||
|
messages.insert(0, c_history)
|
||||||
if sync_config == 'sync':
|
if sync_config == 'sync':
|
||||||
self.get_ai(ai, prompt, partner_name, channel, user_id, message)
|
self.get_ai_response(ai, messages, channel, user_id, message)
|
||||||
else:
|
else:
|
||||||
self.with_delay().get_ai(ai, prompt, partner_name, channel, user_id, message)
|
self.with_delay().get_ai_response(ai, messages, channel, user_id, message)
|
||||||
# res = ai.get_ai(prompt, partner_name)
|
|
||||||
# res = res.replace('\n', '<br/>')
|
|
||||||
# print('res:',res)
|
|
||||||
# print('channel:',channel)
|
|
||||||
# channel.with_user(user_id).message_post(body=res, message_type='comment',subtype_xmlid='mail.mt_comment', parent_id=message.id)
|
|
||||||
# channel.with_user(user_chatgpt).message_post(body=res, message_type='notification', subtype_xmlid='mail.mt_comment')
|
|
||||||
# channel.sudo().message_post(
|
|
||||||
# body=res,
|
|
||||||
# author_id=partner_chatgpt.id,
|
|
||||||
# message_type="comment",
|
|
||||||
# subtype_xmlid="mail.mt_comment",
|
|
||||||
# )
|
|
||||||
# self.with_user(user_chatgpt).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment')
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise UserError(_(e))
|
raise UserError(_(e))
|
||||||
elif author_id != to_partner_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
elif author_id != answer_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
||||||
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{answer_id.id}')
|
||||||
try:
|
try:
|
||||||
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt, openapi_context_timeout)
|
messages = [{"role": "user", "content": msg}]
|
||||||
|
c_history = self.get_openai_context(chatgpt_channel_id.id, author_id, answer_id, openapi_context_timeout)
|
||||||
|
if c_history:
|
||||||
|
messages.insert(0, c_history)
|
||||||
if sync_config == 'sync':
|
if sync_config == 'sync':
|
||||||
self.get_ai(ai, prompt, 'odoo', chatgpt_channel_id, user_id, message)
|
self.get_ai_response(ai, messages, chatgpt_channel_id, user_id, message)
|
||||||
else:
|
else:
|
||||||
self.with_delay().get_ai(ai, prompt, 'odoo', chatgpt_channel_id, user_id, message)
|
self.with_delay().get_ai(ai, messages, chatgpt_channel_id, user_id, message)
|
||||||
# res = ai.get_ai(prompt, 'odoo')
|
|
||||||
# res = res.replace('\n', '<br/>')
|
|
||||||
# chatgpt_channel_id.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise UserError(_(e))
|
raise UserError(_(e))
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
<field name="provider" optional="hide"/>
|
<field name="provider" optional="hide"/>
|
||||||
<field name="ai_model" optional="show"/>
|
<field name="ai_model" optional="show"/>
|
||||||
<field name="openapi_api_key" password="True"/>
|
<field name="openapi_api_key" password="True"/>
|
||||||
<field name="max_length" optional="show"/>
|
<field name="max_tokens" optional="show"/>
|
||||||
<field name="temperature"/>
|
<field name="temperature"/>
|
||||||
</tree>
|
</tree>
|
||||||
</field>
|
</field>
|
||||||
@@ -26,12 +26,19 @@
|
|||||||
<button string="Get Model Info" type="object" name="get_ai_model_info"/>
|
<button string="Get Model Info" type="object" name="get_ai_model_info"/>
|
||||||
</header>
|
</header>
|
||||||
<sheet>
|
<sheet>
|
||||||
|
<div class="oe_title">
|
||||||
|
<label for="name"/>
|
||||||
|
<h1>
|
||||||
|
<field name="name" placeholder="Robot Name" required="1"/>
|
||||||
|
</h1>
|
||||||
|
</div>
|
||||||
<group>
|
<group>
|
||||||
<group>
|
<group>
|
||||||
<field name="name"/>
|
|
||||||
<field name="openapi_api_key" password="True" required="True"/>
|
<field name="openapi_api_key" password="True" required="True"/>
|
||||||
<field name="temperature"/>
|
<field name="temperature"/>
|
||||||
<field name="sequence"/>
|
<field name="top_p"/>
|
||||||
|
<field name="frequency_penalty"/>
|
||||||
|
<field name="presence_penalty"/>
|
||||||
</group>
|
</group>
|
||||||
<group>
|
<group>
|
||||||
<field name="ai_model"/>
|
<field name="ai_model"/>
|
||||||
@@ -42,11 +49,12 @@
|
|||||||
<field name="provider"/>
|
<field name="provider"/>
|
||||||
<a href="https://platform.openai.com/docs/introduction" title="OpenAI Document" class="o_doc_link" target="_blank"></a>
|
<a href="https://platform.openai.com/docs/introduction" title="OpenAI Document" class="o_doc_link" target="_blank"></a>
|
||||||
</div>
|
</div>
|
||||||
<field name="max_length"/>
|
<field name="max_tokens"/>
|
||||||
<field name="engine"/>
|
<field name="engine"/>
|
||||||
<field name="endpoint"/>
|
<field name="endpoint"/>
|
||||||
<field name="api_version"/>
|
<field name="api_version"/>
|
||||||
<field name="ai_timeout"/>
|
<field name="ai_timeout"/>
|
||||||
|
<field name="sequence"/>
|
||||||
</group>
|
</group>
|
||||||
<group>
|
<group>
|
||||||
<field name="is_filtering"/>
|
<field name="is_filtering"/>
|
||||||
|
|||||||
Reference in New Issue
Block a user