mirror of
https://github.com/guohuadeng/app-odoo.git
synced 2025-02-23 04:11:36 +02:00
Merge branch '16.0' of https://github.com/guohuadeng/app-odoo into 16.0
This commit is contained in:
@@ -1,5 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
|
|
||||||
|
|
||||||
# from . import controllers
|
# from . import controllers
|
||||||
from . import models
|
from . import models
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
{
|
{
|
||||||
'name': 'Latest ChatGPT4 AI Center. GPT 4 for image, Dall-E Image.Multi Robot Support. Chat and Training',
|
'name': 'Latest ChatGPT4 AI Center. GPT 4 for image, Dall-E Image.Multi Robot Support. Chat and Training',
|
||||||
'version': '16.23.03.20',
|
'version': '16.23.03.28',
|
||||||
'author': 'Sunpop.cn',
|
'author': 'Sunpop.cn',
|
||||||
'company': 'Sunpop.cn',
|
'company': 'Sunpop.cn',
|
||||||
'maintainer': 'Sunpop.cn',
|
'maintainer': 'Sunpop.cn',
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
|
|
||||||
|
|
||||||
|
|
||||||
from . import main
|
from . import main
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
|
|
||||||
|
|
||||||
from odoo import http
|
from odoo import http
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
|
|
||||||
|
|
||||||
from . import mail_channel
|
from . import mail_channel
|
||||||
from . import res_config_settings
|
from . import res_config_settings
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import requests
|
import requests,json
|
||||||
import openai
|
import openai
|
||||||
from odoo import api, fields, models, _
|
from odoo import api, fields, models, _
|
||||||
from odoo.exceptions import UserError
|
from odoo.exceptions import UserError
|
||||||
@@ -48,8 +48,76 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
|
|
||||||
def action_disconnect(self):
|
def action_disconnect(self):
|
||||||
requests.delete('https://chatgpt.com/v1/disconnect')
|
requests.delete('https://chatgpt.com/v1/disconnect')
|
||||||
|
|
||||||
|
def get_ai(self, data, partner_name='odoo', *args):
|
||||||
|
# 通用方法
|
||||||
|
self.ensure_one()
|
||||||
|
if hasattr(self, 'get_%s' % self.provider):
|
||||||
|
return getattr(self, 'get_%s' % self.provider)(data, partner_name, *args)
|
||||||
|
else:
|
||||||
|
return _('No robot provider found')
|
||||||
|
|
||||||
def get_openai(self, data):
|
def get_openai(self, data, partner_name='odoo', *args):
|
||||||
|
self.ensure_one()
|
||||||
|
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openapi_api_key}"}
|
||||||
|
R_TIMEOUT = 300
|
||||||
|
o_url = self.endpoint or "https://api.openai.com/v1/chat/completions"
|
||||||
|
|
||||||
|
# 以下处理 open ai
|
||||||
|
# 获取模型信息
|
||||||
|
# list_model = requests.get("https://api.openai.com/v1/models", headers=headers)
|
||||||
|
# model_info = requests.get("https://api.openai.com/v1/models/%s" % ai_model, headers=headers)
|
||||||
|
if self.ai_model == 'dall-e2':
|
||||||
|
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
||||||
|
# image_url = response['data'][0]['url']
|
||||||
|
# https://platform.openai.com/docs/guides/images/introduction
|
||||||
|
pdata = {
|
||||||
|
"prompt": data,
|
||||||
|
"n": 3,
|
||||||
|
"size": "1024x1024",
|
||||||
|
}
|
||||||
|
return '建设中'
|
||||||
|
elif self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||||
|
pdata = {
|
||||||
|
"model": self.ai_model,
|
||||||
|
"messages": [{"role": "user", "content": data}],
|
||||||
|
"temperature": 0.9,
|
||||||
|
"max_tokens": self.max_length or 1000,
|
||||||
|
"top_p": 1,
|
||||||
|
"frequency_penalty": 0.0,
|
||||||
|
"presence_penalty": 0.6,
|
||||||
|
"user": partner_name,
|
||||||
|
"stop": ["Human:", "AI:"]
|
||||||
|
}
|
||||||
|
_logger.warning('=====================open input pdata: %s' % pdata)
|
||||||
|
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||||
|
res = response.json()
|
||||||
|
if 'choices' in res:
|
||||||
|
# for rec in res:
|
||||||
|
# res = rec['message']['content']
|
||||||
|
res = '\n'.join([x['message']['content'] for x in res['choices']])
|
||||||
|
return res
|
||||||
|
else:
|
||||||
|
pdata = {
|
||||||
|
"model": self.ai_model,
|
||||||
|
"prompt": data,
|
||||||
|
"temperature": 0.9,
|
||||||
|
"max_tokens": self.max_length or 1000,
|
||||||
|
"top_p": 1,
|
||||||
|
"frequency_penalty": 0.0,
|
||||||
|
"presence_penalty": 0.6,
|
||||||
|
"user": partner_name,
|
||||||
|
"stop": ["Human:", "AI:"]
|
||||||
|
}
|
||||||
|
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||||
|
res = response.json()
|
||||||
|
if 'choices' in res:
|
||||||
|
res = '\n'.join([x['text'] for x in res['choices']])
|
||||||
|
return res
|
||||||
|
|
||||||
|
return "获取结果超时,请重新跟我聊聊。"
|
||||||
|
|
||||||
|
def get_azure(self, data, partner_name='odoo', *args):
|
||||||
self.ensure_one()
|
self.ensure_one()
|
||||||
# only for azure
|
# only for azure
|
||||||
openai.api_type = self.provider
|
openai.api_type = self.provider
|
||||||
|
|||||||
@@ -14,72 +14,6 @@ _logger = logging.getLogger(__name__)
|
|||||||
class Channel(models.Model):
|
class Channel(models.Model):
|
||||||
_inherit = 'mail.channel'
|
_inherit = 'mail.channel'
|
||||||
|
|
||||||
@api.model
|
|
||||||
def get_openai(self, gpt_id, provider, api_key, ai_model, data, user="Odoo"):
|
|
||||||
if provider == 'azure':
|
|
||||||
res = gpt_id.get_openai(data)
|
|
||||||
return res
|
|
||||||
|
|
||||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
|
|
||||||
R_TIMEOUT = 30
|
|
||||||
o_url = gpt_id.endpoint or "https://api.openai.com/v1/chat/completions"
|
|
||||||
|
|
||||||
# 以下处理 open ai
|
|
||||||
# 获取模型信息
|
|
||||||
# list_model = requests.get("https://api.openai.com/v1/models", headers=headers)
|
|
||||||
# model_info = requests.get("https://api.openai.com/v1/models/%s" % ai_model, headers=headers)
|
|
||||||
if ai_model == 'dall-e2':
|
|
||||||
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
|
||||||
# image_url = response['data'][0]['url']
|
|
||||||
# https://platform.openai.com/docs/guides/images/introduction
|
|
||||||
pdata = {
|
|
||||||
"prompt": data,
|
|
||||||
"n": 3,
|
|
||||||
"size": "1024x1024",
|
|
||||||
}
|
|
||||||
return '建设中'
|
|
||||||
elif ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
|
||||||
pdata = {
|
|
||||||
"model": ai_model,
|
|
||||||
"messages": [{"role": "user", "content": data}],
|
|
||||||
"temperature": 0.9,
|
|
||||||
"max_tokens": gpt_id.max_length or 1000,
|
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0.0,
|
|
||||||
"presence_penalty": 0.6,
|
|
||||||
"user": user,
|
|
||||||
"stop": ["Human:", "AI:"]
|
|
||||||
}
|
|
||||||
|
|
||||||
_logger.warning('=====================open input pdata: %s' % pdata)
|
|
||||||
|
|
||||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
|
||||||
res = response.json()
|
|
||||||
if 'choices' in res:
|
|
||||||
# for rec in res:
|
|
||||||
# res = rec['message']['content']
|
|
||||||
res = '\n'.join([x['message']['content'] for x in res['choices']])
|
|
||||||
return res
|
|
||||||
else:
|
|
||||||
pdata = {
|
|
||||||
"model": ai_model,
|
|
||||||
"prompt": data,
|
|
||||||
"temperature": 0.9,
|
|
||||||
"max_tokens": gpt_id.max_length or 1000,
|
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0.0,
|
|
||||||
"presence_penalty": 0.6,
|
|
||||||
"user": user,
|
|
||||||
"stop": ["Human:", "AI:"]
|
|
||||||
}
|
|
||||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
|
||||||
res = response.json()
|
|
||||||
if 'choices' in res:
|
|
||||||
res = '\n'.join([x['text'] for x in res['choices']])
|
|
||||||
return res
|
|
||||||
|
|
||||||
return "获取结果超时,请重新跟我聊聊。"
|
|
||||||
|
|
||||||
@api.model
|
@api.model
|
||||||
def get_openai_context(self, channel_id, partner_chatgpt, current_prompt, seconds=600):
|
def get_openai_context(self, channel_id, partner_chatgpt, current_prompt, seconds=600):
|
||||||
afterTime = fields.Datetime.now() - datetime.timedelta(seconds=seconds)
|
afterTime = fields.Datetime.now() - datetime.timedelta(seconds=seconds)
|
||||||
@@ -110,27 +44,13 @@ class Channel(models.Model):
|
|||||||
|
|
||||||
return '\n'.join(prompt[::-1])
|
return '\n'.join(prompt[::-1])
|
||||||
|
|
||||||
def get_chatgpt_answer(self, prompt, partner_name):
|
|
||||||
response = openai.Completion.create(
|
|
||||||
model="text-davinci-003",
|
|
||||||
prompt=prompt,
|
|
||||||
temperature=0.6,
|
|
||||||
max_tokens=3000,
|
|
||||||
top_p=1,
|
|
||||||
frequency_penalty=0,
|
|
||||||
presence_penalty=0,
|
|
||||||
user=partner_name,
|
|
||||||
)
|
|
||||||
res = response['choices'][0]['text']
|
|
||||||
return res
|
|
||||||
|
|
||||||
def _notify_thread(self, message, msg_vals=False, **kwargs):
|
def _notify_thread(self, message, msg_vals=False, **kwargs):
|
||||||
rdata = super(Channel, self)._notify_thread(message, msg_vals=msg_vals, **kwargs)
|
rdata = super(Channel, self)._notify_thread(message, msg_vals=msg_vals, **kwargs)
|
||||||
# print(f'rdata:{rdata}')
|
# print(f'rdata:{rdata}')
|
||||||
to_partner_id = self.env['res.partner']
|
to_partner_id = self.env['res.partner']
|
||||||
user_id = self.env['res.users']
|
user_id = self.env['res.users']
|
||||||
author_id = msg_vals.get('author_id')
|
author_id = msg_vals.get('author_id')
|
||||||
gpt_id = self.env['ai.robot']
|
ai = self.env['ai.robot']
|
||||||
channel_type = self.channel_type
|
channel_type = self.channel_type
|
||||||
if channel_type == 'chat':
|
if channel_type == 'chat':
|
||||||
channel_partner_ids = self.channel_partner_ids
|
channel_partner_ids = self.channel_partner_ids
|
||||||
@@ -141,7 +61,7 @@ class Channel(models.Model):
|
|||||||
gpt_wl_users = user_id.gpt_wl_users
|
gpt_wl_users = user_id.gpt_wl_users
|
||||||
is_allow = message.create_uid.id in gpt_wl_users.ids
|
is_allow = message.create_uid.id in gpt_wl_users.ids
|
||||||
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||||
gpt_id = user_id.gpt_id
|
ai = user_id.gpt_id
|
||||||
|
|
||||||
elif channel_type in ['group', 'channel']:
|
elif channel_type in ['group', 'channel']:
|
||||||
# partner_ids = @ ids
|
# partner_ids = @ ids
|
||||||
@@ -156,14 +76,10 @@ class Channel(models.Model):
|
|||||||
is_allow = message.create_uid.id in gpt_wl_users.ids
|
is_allow = message.create_uid.id in gpt_wl_users.ids
|
||||||
to_partner_id = user_id.partner_id
|
to_partner_id = user_id.partner_id
|
||||||
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
if gpt_policy == 'all' or (gpt_policy == 'limit' and is_allow):
|
||||||
gpt_id = user_id.gpt_id
|
ai = user_id.gpt_id
|
||||||
|
|
||||||
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
chatgpt_channel_id = self.env.ref('app_chatgpt.channel_chatgpt')
|
||||||
|
|
||||||
# print('author_id:',author_id)
|
|
||||||
|
|
||||||
# print('partner_chatgpt.id:',partner_chatgpt.id)
|
|
||||||
|
|
||||||
prompt = msg_vals.get('body')
|
prompt = msg_vals.get('body')
|
||||||
# print('prompt:', prompt)
|
# print('prompt:', prompt)
|
||||||
# print('-----')
|
# print('-----')
|
||||||
@@ -171,8 +87,8 @@ class Channel(models.Model):
|
|||||||
return rdata
|
return rdata
|
||||||
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
|
# api_key = self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_api_key')
|
||||||
api_key = ''
|
api_key = ''
|
||||||
if gpt_id:
|
if ai:
|
||||||
api_key = gpt_id.openapi_api_key
|
api_key = ai.openapi_api_key
|
||||||
if not api_key:
|
if not api_key:
|
||||||
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
|
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
|
||||||
return rdata
|
return rdata
|
||||||
@@ -186,11 +102,7 @@ class Channel(models.Model):
|
|||||||
# print(msg_vals)
|
# print(msg_vals)
|
||||||
# print(msg_vals.get('record_name', ''))
|
# print(msg_vals.get('record_name', ''))
|
||||||
# print('self.channel_type :',self.channel_type)
|
# print('self.channel_type :',self.channel_type)
|
||||||
if gpt_id:
|
if ai:
|
||||||
provider = gpt_id.provider
|
|
||||||
ai_model = gpt_id.ai_model or 'text-davinci-003'
|
|
||||||
# print('chatgpt_name:', chatgpt_name)
|
|
||||||
# if author_id != to_partner_id.id and (chatgpt_name in msg_vals.get('record_name', '') or 'ChatGPT' in msg_vals.get('record_name', '') ) and self.channel_type == 'chat':
|
|
||||||
if author_id != to_partner_id.id and self.channel_type == 'chat':
|
if author_id != to_partner_id.id and self.channel_type == 'chat':
|
||||||
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
||||||
try:
|
try:
|
||||||
@@ -198,8 +110,7 @@ class Channel(models.Model):
|
|||||||
# if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
# if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||||
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt, openapi_context_timeout)
|
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt, openapi_context_timeout)
|
||||||
print(prompt)
|
print(prompt)
|
||||||
# res = self.get_chatgpt_answer(prompt,partner_name)
|
res = ai.get_ai(prompt, partner_name)
|
||||||
res = self.get_openai(gpt_id, provider, api_key, ai_model, prompt, partner_name)
|
|
||||||
res = res.replace('\n', '<br/>')
|
res = res.replace('\n', '<br/>')
|
||||||
# print('res:',res)
|
# print('res:',res)
|
||||||
# print('channel:',channel)
|
# print('channel:',channel)
|
||||||
@@ -214,16 +125,13 @@ class Channel(models.Model):
|
|||||||
# self.with_user(user_chatgpt).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment')
|
# self.with_user(user_chatgpt).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise UserError(_(e))
|
raise UserError(_(e))
|
||||||
|
|
||||||
elif author_id != to_partner_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
elif author_id != to_partner_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
|
||||||
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
|
||||||
try:
|
try:
|
||||||
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt, openapi_context_timeout)
|
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt, openapi_context_timeout)
|
||||||
# print(prompt)
|
res = ai.get_ai(prompt, 'odoo')
|
||||||
# res = self.get_chatgpt_answer(prompt, partner_name)
|
|
||||||
res = self.get_openai(gpt_id, provider, api_key, ai_model, prompt, partner_name)
|
|
||||||
res = res.replace('\n', '<br/>')
|
res = res.replace('\n', '<br/>')
|
||||||
chatgpt_channel_id.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment',parent_id=message.id)
|
chatgpt_channel_id.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment', parent_id=message.id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise UserError(_(e))
|
raise UserError(_(e))
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
|
|
||||||
|
|
||||||
from odoo import fields, models
|
from odoo import fields, models
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,8 @@
|
|||||||
<field name="model">ai.robot</field>
|
<field name="model">ai.robot</field>
|
||||||
<field name="arch" type="xml">
|
<field name="arch" type="xml">
|
||||||
<form>
|
<form>
|
||||||
|
<header>
|
||||||
|
</header>
|
||||||
<sheet>
|
<sheet>
|
||||||
<group>
|
<group>
|
||||||
<group>
|
<group>
|
||||||
|
|||||||
Reference in New Issue
Block a user