mirror of
https://github.com/guohuadeng/app-odoo.git
synced 2025-02-23 04:11:36 +02:00
Merge branch '16.0' of https://github.com/guohuadeng/app-odoo into 16.0
This commit is contained in:
@@ -1,3 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from . import controllers
|
||||||
from . import models
|
from . import models
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
{
|
{
|
||||||
'name': "Database auto backup,数据库自动备份",
|
'name': "Database auto backup,数据库自动备份",
|
||||||
'version': '16.24.03.12',
|
'version': '16.24.05.22',
|
||||||
|
|
||||||
'summary': 'Automated backups, optimized from auto_backup of Yenthe Van Ginneken',
|
'summary': 'Automated backups, optimized from auto_backup of Yenthe Van Ginneken',
|
||||||
|
|
||||||
@@ -31,7 +31,8 @@
|
|||||||
|
|
||||||
# any module necessary for this one to work correctly
|
# any module necessary for this one to work correctly
|
||||||
'depends': [
|
'depends': [
|
||||||
'base'
|
'base',
|
||||||
|
'app_odoo_customize'
|
||||||
],
|
],
|
||||||
'external_dependencies': {
|
'external_dependencies': {
|
||||||
'python': ['paramiko'],
|
'python': ['paramiko'],
|
||||||
@@ -43,5 +44,6 @@
|
|||||||
'security/ir.model.access.csv',
|
'security/ir.model.access.csv',
|
||||||
'views/backup_view.xml',
|
'views/backup_view.xml',
|
||||||
'data/backup_data.xml',
|
'data/backup_data.xml',
|
||||||
|
'views/db_backup_details.xml',
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|||||||
3
app_auto_backup/controllers/__init__.py
Normal file
3
app_auto_backup/controllers/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from . import main
|
||||||
30
app_auto_backup/controllers/main.py
Normal file
30
app_auto_backup/controllers/main.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from odoo import http, _
|
||||||
|
from odoo.http import request, content_disposition
|
||||||
|
from odoo.exceptions import AccessError, UserError
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AppAutoBackup(http.Controller):
|
||||||
|
|
||||||
|
@http.route("/download/backupfile/<path:file_path>", type="http", auth="user")
|
||||||
|
def download_backupfile(self, file_path, **kw):
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
try:
|
||||||
|
with open(file_path, 'rb') as file:
|
||||||
|
file_content = file.read()
|
||||||
|
file_name = file_path.split("/")[-1]
|
||||||
|
headers = [
|
||||||
|
('Content-Type', 'application/octet-stream'),
|
||||||
|
('Content-Disposition', content_disposition(file_name)),
|
||||||
|
]
|
||||||
|
return request.make_response(file_content, headers)
|
||||||
|
except Exception as e:
|
||||||
|
raise UserError(e)
|
||||||
|
else:
|
||||||
|
return 'File not found'
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from . import db_backup
|
from . import db_backup
|
||||||
|
from . import db_backup_details
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ class DbBackup(models.Model):
|
|||||||
port = fields.Char('Port', required=True, default=8069)
|
port = fields.Char('Port', required=True, default=8069)
|
||||||
name = fields.Char('Database', required=True, help='Database you want to schedule backups for',
|
name = fields.Char('Database', required=True, help='Database you want to schedule backups for',
|
||||||
default=_get_db_name)
|
default=_get_db_name)
|
||||||
folder = fields.Char('Backup Directory', help='Absolute path for storing the backups', required='True',
|
folder = fields.Char('Backup Directory', help='Absolute path for storing the backups', required=True,
|
||||||
default='/usr/lib/python3/dist-packages/odoo/backups')
|
default='/usr/lib/python3/dist-packages/odoo/backups')
|
||||||
backup_type = fields.Selection([('zip', 'Zip'), ('dump', 'Dump')], 'Backup Type', required=True, default='zip')
|
backup_type = fields.Selection([('zip', 'Zip'), ('dump', 'Dump')], 'Backup Type', required=True, default='zip')
|
||||||
autoremove = fields.Boolean('Auto. Remove Backups',
|
autoremove = fields.Boolean('Auto. Remove Backups',
|
||||||
@@ -73,6 +73,7 @@ class DbBackup(models.Model):
|
|||||||
email_to_notify = fields.Char('E-mail to notify',
|
email_to_notify = fields.Char('E-mail to notify',
|
||||||
help='Fill in the e-mail where you want to be notified that the backup failed on '
|
help='Fill in the e-mail where you want to be notified that the backup failed on '
|
||||||
'the FTP.')
|
'the FTP.')
|
||||||
|
backup_details_ids = fields.One2many('db.backup.details', 'db_backup_id', 'Backup Details')
|
||||||
|
|
||||||
def test_sftp_connection(self, context=None):
|
def test_sftp_connection(self, context=None):
|
||||||
self.ensure_one()
|
self.ensure_one()
|
||||||
@@ -136,11 +137,17 @@ class DbBackup(models.Model):
|
|||||||
fp = open(file_path, 'wb')
|
fp = open(file_path, 'wb')
|
||||||
self._take_dump(rec.name, fp, 'db.backup', rec.backup_type)
|
self._take_dump(rec.name, fp, 'db.backup', rec.backup_type)
|
||||||
fp.close()
|
fp.close()
|
||||||
|
rec.backup_details_ids.create({
|
||||||
|
'name': bkp_file,
|
||||||
|
'file_path': file_path,
|
||||||
|
'url': '/download/backupfile/%s' % file_path,
|
||||||
|
'db_backup_id': rec.id,
|
||||||
|
})
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
_logger.debug(
|
_logger.warning(
|
||||||
"Couldn't backup database %s. Bad database administrator password for server running at "
|
"Couldn't backup database %s. Bad database administrator password for server running at "
|
||||||
"http://%s:%s" % (rec.name, rec.host, rec.port))
|
"http://%s:%s" % (rec.name, rec.host, rec.port))
|
||||||
_logger.debug("Exact error from the exception: %s", str(error))
|
_logger.warning("Exact error from the exception: %s", str(error))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Check if user wants to write to SFTP or not.
|
# Check if user wants to write to SFTP or not.
|
||||||
@@ -262,6 +269,10 @@ class DbBackup(models.Model):
|
|||||||
# Only delete files (which are .dump and .zip), no directories.
|
# Only delete files (which are .dump and .zip), no directories.
|
||||||
if os.path.isfile(fullpath) and (".dump" in f or '.zip' in f):
|
if os.path.isfile(fullpath) and (".dump" in f or '.zip' in f):
|
||||||
_logger.info("Delete local out-of-date file: %s", fullpath)
|
_logger.info("Delete local out-of-date file: %s", fullpath)
|
||||||
|
backup_details_id = self.env['db.backup.details'].search([('file_path', '=', fullpath)])
|
||||||
|
if backup_details_id:
|
||||||
|
backup_details_id.unlink()
|
||||||
|
else:
|
||||||
os.remove(fullpath)
|
os.remove(fullpath)
|
||||||
|
|
||||||
# This is more or less the same as the default Odoo function at
|
# This is more or less the same as the default Odoo function at
|
||||||
|
|||||||
37
app_auto_backup/models/db_backup_details.py
Normal file
37
app_auto_backup/models/db_backup_details.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from odoo import api, fields, models, _
|
||||||
|
from odoo.exceptions import AccessError, UserError
|
||||||
|
|
||||||
|
|
||||||
|
class DbBackupDetails(models.Model):
|
||||||
|
_name = 'db.backup.details'
|
||||||
|
_description = 'Database Backup Details'
|
||||||
|
|
||||||
|
name = fields.Char(string='Name')
|
||||||
|
file_path = fields.Char(string="File Path")
|
||||||
|
url = fields.Char(string='URL')
|
||||||
|
db_backup_id = fields.Many2one('db.backup', 'Database Backup')
|
||||||
|
|
||||||
|
def action_download_file(self):
|
||||||
|
self.ensure_one()
|
||||||
|
if not self.file_path or not self.url:
|
||||||
|
raise UserError(_("File Path or URL not found."))
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'type': 'ir.actions.act_url',
|
||||||
|
'url': self.url,
|
||||||
|
'target': 'new',
|
||||||
|
}
|
||||||
|
|
||||||
|
def unlink(self):
|
||||||
|
if self.file_path:
|
||||||
|
if os.path.exists(self.file_path):
|
||||||
|
os.remove(self.file_path)
|
||||||
|
return super(DbBackupDetails, self).unlink()
|
||||||
|
|
||||||
|
def action_remove_file(self):
|
||||||
|
self.ensure_one()
|
||||||
|
self.unlink()
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
|
id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
|
||||||
admin_access, db_backup admin access,model_db_backup,base.group_no_one,1,1,1,1
|
admin_access, db_backup admin access,model_db_backup,base.group_no_one,1,1,1,1
|
||||||
admin_security_rule, Model db_backup admin access,model_db_backup,app_auto_backup.group_manager,1,1,1,1
|
admin_security_rule, Model db_backup admin access,model_db_backup,app_auto_backup.group_manager,1,1,1,1
|
||||||
|
admin_db_backup_details, Model db_backup_details admin access,model_db_backup_details,app_auto_backup.group_manager,1,1,1,1
|
||||||
|
|||||||
|
@@ -8,10 +8,8 @@
|
|||||||
<form string="Back-up view">
|
<form string="Back-up view">
|
||||||
<sheet>
|
<sheet>
|
||||||
<div class="oe_button_box" name="button_box">
|
<div class="oe_button_box" name="button_box">
|
||||||
<button name="action_view_cron" string="View Cron"
|
<button name="action_view_cron" string="View Cron" type="object" class="oe_stat_button" icon="fa-clock-o"/>
|
||||||
type="object" class="oe_stat_button" icon="fa-clock-o"/>
|
<button name="action_run_cron" string="Run Backup" type="object" class="oe_stat_button" icon="fa-play-circle"/>
|
||||||
<button name="action_run_cron" string="Run Backup"
|
|
||||||
type="object" class="oe_stat_button" icon="fa-play-circle"/>
|
|
||||||
</div>
|
</div>
|
||||||
<group col="4" colspan="4">
|
<group col="4" colspan="4">
|
||||||
<separator col="2" string="Local backup configuration"/>
|
<separator col="2" string="Local backup configuration"/>
|
||||||
@@ -29,32 +27,21 @@
|
|||||||
<separator col="2" string="SFTP"/>
|
<separator col="2" string="SFTP"/>
|
||||||
</group>
|
</group>
|
||||||
<div style="width:50%;border-radius:10px;margin: 10px 0px;padding:15px 10px 15px 10px;
|
<div style="width:50%;border-radius:10px;margin: 10px 0px;padding:15px 10px 15px 10px;
|
||||||
background-repeat: no-repeat;background-position: 10px center;color: #9F6000;background-color: #FEEFB3;"
|
background-repeat: no-repeat;background-position: 10px center;color: #9F6000;background-color: #FEEFB3;" attrs="{'invisible': [('sftp_write','=',False)]}">
|
||||||
attrs="{'invisible': [('sftp_write','=',False)]}">
|
|
||||||
<b>Warning:</b>
|
<b>Warning:</b>
|
||||||
Use SFTP with caution! This writes files to external servers under the path you specify.
|
Use SFTP with caution! This writes files to external servers under the path you specify.
|
||||||
</div>
|
</div>
|
||||||
<group name="sftp_configuration">
|
<group name="sftp_configuration">
|
||||||
<field name="sftp_write"/>
|
<field name="sftp_write"/>
|
||||||
<field name="sftp_host"
|
<field name="sftp_host" attrs="{'invisible':[('sftp_write', '=', False)],'required':[('sftp_write', '=', True)]}"/>
|
||||||
attrs="{'invisible':[('sftp_write', '=', False)],'required':[('sftp_write', '=', True)]}"/>
|
<field name="sftp_port" attrs="{'invisible':[('sftp_write', '=', False)],'required':[('sftp_write', '=', True)]}"/>
|
||||||
<field name="sftp_port"
|
<field name="sftp_user" attrs="{'invisible':[('sftp_write', '=', False)], 'required':[('sftp_write', '=', True)]}"/>
|
||||||
attrs="{'invisible':[('sftp_write', '=', False)],'required':[('sftp_write', '=', True)]}"/>
|
<field name="sftp_password" attrs="{'invisible':[('sftp_write', '=', False)],'required': [('sftp_write', '=', True)]}" password="True"/>
|
||||||
<field name="sftp_user"
|
<field name="sftp_path" attrs="{'invisible':[('sftp_write', '==', False)],'required':[('sftp_write', '==', True)]}" placeholder="For example: /odoo/backups/"/>
|
||||||
attrs="{'invisible':[('sftp_write', '=', False)], 'required':[('sftp_write', '=', True)]}"/>
|
<field name="days_to_keep_sftp" attrs="{'invisible': [('sftp_write', '=', False)], 'required': [('sftp_write', '=', True)]}"/>
|
||||||
<field name="sftp_password"
|
|
||||||
attrs="{'invisible':[('sftp_write', '=', False)],'required': [('sftp_write', '=', True)]}"
|
|
||||||
password="True"/>
|
|
||||||
<field name="sftp_path"
|
|
||||||
attrs="{'invisible':[('sftp_write', '==', False)],'required':[('sftp_write', '==', True)]}"
|
|
||||||
placeholder="For example: /odoo/backups/"/>
|
|
||||||
<field name="days_to_keep_sftp"
|
|
||||||
attrs="{'invisible': [('sftp_write', '=', False)], 'required': [('sftp_write', '=', True)]}"/>
|
|
||||||
<field name="send_mail_sftp_fail" attrs="{'invisible': [('sftp_write','=',False)]}"/>
|
<field name="send_mail_sftp_fail" attrs="{'invisible': [('sftp_write','=',False)]}"/>
|
||||||
<field name="email_to_notify"
|
<field name="email_to_notify" attrs="{'invisible':['|',('send_mail_sftp_fail', '==', False), ('sftp_write', '=', False)], 'required': [('send_mail_sftp_fail', '=', True)]}"/>
|
||||||
attrs="{'invisible':['|',('send_mail_sftp_fail', '==', False), ('sftp_write', '=', False)], 'required': [('send_mail_sftp_fail', '=', True)]}"/>
|
<button name="test_sftp_connection" type="object" attrs="{'invisible': [('sftp_write','=',False)]}" string="Test SFTP Connection"/>
|
||||||
<button name="test_sftp_connection" type="object"
|
|
||||||
attrs="{'invisible': [('sftp_write','=',False)]}" string="Test SFTP Connection"/>
|
|
||||||
</group>
|
</group>
|
||||||
<separator string="Help" colspan="2"/>
|
<separator string="Help" colspan="2"/>
|
||||||
<div name="configuration_details">
|
<div name="configuration_details">
|
||||||
@@ -78,6 +65,19 @@ background-repeat: no-repeat;background-position: 10px center;color: #9F6000;bac
|
|||||||
<a href="https://www.odooai.cn">Contact odooai.cn!</a>
|
<a href="https://www.odooai.cn">Contact odooai.cn!</a>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
<notebook>
|
||||||
|
<page name="backup_details" string="Backup records">
|
||||||
|
<field name="backup_details_ids" readonly="1">
|
||||||
|
<tree>
|
||||||
|
<field name="name"/>
|
||||||
|
<field name="file_path"/>
|
||||||
|
<field name="url" invisible="0"/>
|
||||||
|
<button name="action_download_file" type="object" title="Download File" class="fa fa-download"/>
|
||||||
|
<button name="action_remove_file" type="object" title="Remove File" class="fa fa-trash"/>
|
||||||
|
</tree>
|
||||||
|
</field>
|
||||||
|
</page>
|
||||||
|
</notebook>
|
||||||
</sheet>
|
</sheet>
|
||||||
</form>
|
</form>
|
||||||
</field>
|
</field>
|
||||||
@@ -106,8 +106,7 @@ background-repeat: no-repeat;background-position: 10px center;color: #9F6000;bac
|
|||||||
<field name="view_id" ref="view_backup_config_tree"/>
|
<field name="view_id" ref="view_backup_config_tree"/>
|
||||||
</record>
|
</record>
|
||||||
|
|
||||||
<menuitem id="auto_backup_menu" name="Back-ups" sequence="9"
|
<menuitem id="auto_backup_menu" name="Back-ups" sequence="9" parent="app_odoo_customize.menu_app_group"/>
|
||||||
parent="app_odoo_customize.menu_app_group"/>
|
<menuitem parent="auto_backup_menu" action="action_backup" id="backup_conf_menu" sequence="1"/>
|
||||||
<menuitem parent="auto_backup_menu" action="action_backup" id="backup_conf_menu"/>
|
|
||||||
</data>
|
</data>
|
||||||
</odoo>
|
</odoo>
|
||||||
|
|||||||
24
app_auto_backup/views/db_backup_details.xml
Normal file
24
app_auto_backup/views/db_backup_details.xml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<odoo>
|
||||||
|
<record id="db_backup_details_tree_view" model="ir.ui.view">
|
||||||
|
<field name="name">db.backup.details.tree</field>
|
||||||
|
<field name="model">db.backup.details</field>
|
||||||
|
<field name="arch" type="xml">
|
||||||
|
<tree>
|
||||||
|
<field name="name"/>
|
||||||
|
<field name="file_path"/>
|
||||||
|
<field name="url" invisible="1"/>
|
||||||
|
<button name="action_download_file" type="object" title="Download File" class="fa fa-download"/>
|
||||||
|
<button name="action_remove_file" type="object" title="Remove File" class="fa fa-trash"/>
|
||||||
|
</tree>
|
||||||
|
</field>
|
||||||
|
</record>
|
||||||
|
|
||||||
|
<record id="action_db_backup_details" model="ir.actions.act_window">
|
||||||
|
<field name="name">Database backups</field>
|
||||||
|
<field name="res_model">db.backup.details</field>
|
||||||
|
<field name="view_mode">tree</field>
|
||||||
|
</record>
|
||||||
|
|
||||||
|
<menuitem id="menu_action_db_backup_details" action="action_db_backup_details" parent="auto_backup_menu" sequence="3"/>
|
||||||
|
</odoo>
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
{
|
{
|
||||||
'name': 'ChatGPT4, China Ali,AiGC Center.Ai服务中心,聚合全网Ai',
|
'name': 'ChatGPT4, China Ali,AiGC Center.Ai服务中心,聚合全网Ai',
|
||||||
'version': '16.24.04.19',
|
'version': '16.24.05.24',
|
||||||
'author': 'odooai.cn',
|
'author': 'odooai.cn',
|
||||||
'company': 'odooai.cn',
|
'company': 'odooai.cn',
|
||||||
'maintainer': 'odooai.cn',
|
'maintainer': 'odooai.cn',
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import openai.openai_object
|
import requests
|
||||||
import requests, json
|
import json
|
||||||
import openai
|
|
||||||
import base64
|
import base64
|
||||||
|
import logging
|
||||||
|
from openai import OpenAI
|
||||||
|
from openai import AzureOpenAI
|
||||||
|
|
||||||
from odoo import api, fields, models, modules, tools, _
|
from odoo import api, fields, models, modules, tools, _
|
||||||
from odoo.exceptions import UserError
|
from odoo.exceptions import UserError
|
||||||
|
|
||||||
import logging
|
|
||||||
_logger = logging.getLogger(__name__)
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -23,19 +24,18 @@ class AiRobot(models.Model):
|
|||||||
# update ai_robot set ai_model=set_ai_model
|
# update ai_robot set ai_model=set_ai_model
|
||||||
ai_model = fields.Char(string="AI Model", required=True, default='auto', help='Customize input')
|
ai_model = fields.Char(string="AI Model", required=True, default='auto', help='Customize input')
|
||||||
set_ai_model = fields.Selection(string="Quick Set Model", selection=[
|
set_ai_model = fields.Selection(string="Quick Set Model", selection=[
|
||||||
('gpt-3.5-turbo-0125', 'gpt-3.5-turbo-0125(Default and Latest)'),
|
('gpt-3.5-turbo-0125', 'GPT 3.5 Turbo(Default and Latest)'),
|
||||||
('gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0613'),
|
('gpt-4o', 'GPT-4o'),
|
||||||
('gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-16k-0613(Big text)'),
|
('gpt-4-turbo', 'GPT-4 Turbo'),
|
||||||
('gpt-4', 'Chatgpt 4'),
|
('gpt-4', 'Chatgpt 4'),
|
||||||
('gpt-4-32k', 'Chatgpt 4 32k'),
|
('gpt-4-32k', 'Chatgpt 4 32k'),
|
||||||
('gpt-3.5-turbo', 'Chatgpt 3.5 Turbo'),
|
|
||||||
('gpt-3.5-turbo-0301', 'Chatgpt 3.5 Turbo on 20230301'),
|
|
||||||
('text-davinci-003', 'Chatgpt 3 Davinci'),
|
('text-davinci-003', 'Chatgpt 3 Davinci'),
|
||||||
('code-davinci-002', 'Chatgpt 2 Code Optimized'),
|
('code-davinci-002', 'Chatgpt 2 Code Optimized'),
|
||||||
('text-davinci-002', 'Chatgpt 2 Davinci'),
|
('text-davinci-002', 'Chatgpt 2 Davinci'),
|
||||||
('dall-e2', 'Dall-E Image'),
|
('dall-e2', 'Dall-E Image'),
|
||||||
], default='gpt-3.5-turbo-0125',
|
], default='gpt-3.5-turbo-0125',
|
||||||
help="""
|
help="""
|
||||||
|
GPT-4o: It is multimodal (accepting text or image inputs and outputting text), and it has the same high intelligence as GPT-4 Turbo but is much more efficient—it generates text 2x faster and is 50% cheaper.
|
||||||
GPT-4: Can understand Image, generate natural language or code.
|
GPT-4: Can understand Image, generate natural language or code.
|
||||||
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
|
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
|
||||||
DALL·E: A model that can generate and edit images given a natural language prompt
|
DALL·E: A model that can generate and edit images given a natural language prompt
|
||||||
@@ -172,11 +172,14 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
|
res_post, usage, is_ai = self.get_ai_post(res, author_id, answer_id, param)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def get_ai_post(self, res, author_id=False, answer_id=False, param={}):
|
def get_ai_post(self, res, author_id=False, answer_id=False, param=None):
|
||||||
# hook,高级版要替代
|
# hook,高级版要替代
|
||||||
if res and author_id and isinstance(res, openai.openai_object.OpenAIObject) or isinstance(res, list) or isinstance(res, dict):
|
if param is None:
|
||||||
# 返回是个对象,那么就是ai
|
param = {}
|
||||||
# if isinstance(res, dict):
|
if not res or not author_id or (not isinstance(res, list) and not isinstance(res, dict)):
|
||||||
|
return res, False, False
|
||||||
|
usage = content = data = None
|
||||||
|
try:
|
||||||
if self.provider == 'openai':
|
if self.provider == 'openai':
|
||||||
# openai 格式处理
|
# openai 格式处理
|
||||||
usage = res['usage']
|
usage = res['usage']
|
||||||
@@ -184,17 +187,38 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
# _logger.warning('===========Ai响应:%s' % content)
|
# _logger.warning('===========Ai响应:%s' % content)
|
||||||
elif self.provider == 'azure':
|
elif self.provider == 'azure':
|
||||||
# azure 格式
|
# azure 格式
|
||||||
usage = json.loads(json.dumps(res['usage']))
|
usage = res['usage']
|
||||||
content = json.loads(json.dumps(res['choices'][0]['message']['content']))
|
content = res['choices'][0]['message']['content']
|
||||||
else:
|
else:
|
||||||
usage = False
|
usage = False
|
||||||
content = res
|
content = res
|
||||||
data = content.replace(' .', '.').strip()
|
data = content.replace(' .', '.').strip()
|
||||||
return data, usage, True
|
return data, usage, True
|
||||||
else:
|
except Exception as e:
|
||||||
# 直接返回错误语句,那么就是非ai
|
_logger.error('==========app_chatgpt get_ai_post Error: %s' % e)
|
||||||
return res, False, False
|
return res, False, False
|
||||||
|
|
||||||
|
# if res and author_id and isinstance(res, openai.openai_object.OpenAIObject) or isinstance(res, list) or isinstance(res, dict):
|
||||||
|
# # 返回是个对象,那么就是ai
|
||||||
|
# # if isinstance(res, dict):
|
||||||
|
# if self.provider == 'openai':
|
||||||
|
# # openai 格式处理
|
||||||
|
# usage = res['usage']
|
||||||
|
# content = res['choices'][0]['message']['content']
|
||||||
|
# # _logger.warning('===========Ai响应:%s' % content)
|
||||||
|
# elif self.provider == 'azure':
|
||||||
|
# # azure 格式
|
||||||
|
# usage = json.loads(json.dumps(res['usage']))
|
||||||
|
# content = json.loads(json.dumps(res['choices'][0]['message']['content']))
|
||||||
|
# else:
|
||||||
|
# usage = False
|
||||||
|
# content = res
|
||||||
|
# data = content.replace(' .', '.').strip()
|
||||||
|
# return data, usage, True
|
||||||
|
# else:
|
||||||
|
# # 直接返回错误语句,那么就是非ai
|
||||||
|
# return res, False, False
|
||||||
|
|
||||||
def get_ai_system(self, content=None):
|
def get_ai_system(self, content=None):
|
||||||
# 获取基础ai角色设定, role system
|
# 获取基础ai角色设定, role system
|
||||||
sys_content = content or self.sys_content
|
sys_content = content or self.sys_content
|
||||||
@@ -254,56 +278,7 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
else:
|
else:
|
||||||
stop = ["Human:", "AI:"]
|
stop = ["Human:", "AI:"]
|
||||||
# 以下处理 open ai
|
# 以下处理 open ai
|
||||||
if self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
if self.ai_model == 'dall-e2':
|
||||||
# 基本与 azure 同,要处理 api_base
|
|
||||||
openai.api_key = self.openapi_api_key
|
|
||||||
openai.api_base = o_url.replace('/chat/completions', '')
|
|
||||||
if isinstance(data, list):
|
|
||||||
messages = data
|
|
||||||
else:
|
|
||||||
messages = [{"role": "user", "content": data}]
|
|
||||||
# Ai角色设定,如果没设定则再处理
|
|
||||||
if messages[0].get('role') != 'system':
|
|
||||||
sys_content = self.get_ai_system(param.get('sys_content'))
|
|
||||||
if sys_content:
|
|
||||||
messages.insert(0, sys_content)
|
|
||||||
# todo: 当前反向代理方式不通,要调整为 远程主机中接受请求,post到openai,再将结果返回给请求者
|
|
||||||
# response = openai.ChatCompletion.create(
|
|
||||||
# model=self.ai_model,
|
|
||||||
# messages=messages,
|
|
||||||
# # 返回的回答数量
|
|
||||||
# n=1,
|
|
||||||
# max_tokens=max_tokens,
|
|
||||||
# temperature=temperature,
|
|
||||||
# top_p=top_p,
|
|
||||||
# frequency_penalty=frequency_penalty,
|
|
||||||
# presence_penalty=presence_penalty,
|
|
||||||
# stop=stop,
|
|
||||||
# request_timeout=request_timeout,
|
|
||||||
# )
|
|
||||||
# if 'choices' in response:
|
|
||||||
# return response
|
|
||||||
# todo: 两种方式一样,要调整 v 服务器的二次处理 /root/toai.py
|
|
||||||
pdata = {
|
|
||||||
"model": self.ai_model,
|
|
||||||
"messages": messages,
|
|
||||||
"max_tokens": max_tokens,
|
|
||||||
"temperature": temperature,
|
|
||||||
"top_p": top_p,
|
|
||||||
"frequency_penalty": frequency_penalty,
|
|
||||||
"presence_penalty": presence_penalty,
|
|
||||||
"stop": stop
|
|
||||||
}
|
|
||||||
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
|
||||||
try:
|
|
||||||
res = response.json()
|
|
||||||
if 'choices' in res:
|
|
||||||
return res
|
|
||||||
except Exception as e:
|
|
||||||
_logger.warning("Get Response Json failed: %s", e)
|
|
||||||
else:
|
|
||||||
_logger.warning('=====================Openai output data: %s' % response.json())
|
|
||||||
elif self.ai_model == 'dall-e2':
|
|
||||||
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
# todo: 处理 图像引擎,主要是返回参数到聊天中
|
||||||
# image_url = response['data'][0]['url']
|
# image_url = response['data'][0]['url']
|
||||||
# https://platform.openai.com/docs/guides/images/introduction
|
# https://platform.openai.com/docs/guides/images/introduction
|
||||||
@@ -324,29 +299,112 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
"presence_penalty": 0.1,
|
"presence_penalty": 0.1,
|
||||||
"stop": stop
|
"stop": stop
|
||||||
}
|
}
|
||||||
response = openai.ChatCompletion.create(
|
client = OpenAI(
|
||||||
model=self.ai_model,
|
api_key=self.openapi_api_key,
|
||||||
messages=data
|
timeout=R_TIMEOUT
|
||||||
)
|
)
|
||||||
# response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
response = client.chat.completions.create(
|
||||||
if 'choices' in response:
|
messages=data,
|
||||||
return response
|
model=self.ai_model,
|
||||||
|
)
|
||||||
|
res = response.model_dump()
|
||||||
|
if 'choices' in res:
|
||||||
|
return res
|
||||||
else:
|
else:
|
||||||
_logger.warning('=====================openai output data: %s' % response.json())
|
_logger.warning('=====================openai output data: %s' % response.json())
|
||||||
|
|
||||||
return _("Response Timeout, please speak again.")
|
return _("Response Timeout, please speak again.")
|
||||||
|
# if self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
|
||||||
|
# # 基本与 azure 同,要处理 api_base
|
||||||
|
# openai.api_key = self.openapi_api_key
|
||||||
|
# openai.api_base = o_url.replace('/chat/completions', '')
|
||||||
|
# if isinstance(data, list):
|
||||||
|
# messages = data
|
||||||
|
# else:
|
||||||
|
# messages = [{"role": "user", "content": data}]
|
||||||
|
# # Ai角色设定,如果没设定则再处理
|
||||||
|
# if messages[0].get('role') != 'system':
|
||||||
|
# sys_content = self.get_ai_system(param.get('sys_content'))
|
||||||
|
# if sys_content:
|
||||||
|
# messages.insert(0, sys_content)
|
||||||
|
# # todo: 当前反向代理方式不通,要调整为 远程主机中接受请求,post到openai,再将结果返回给请求者
|
||||||
|
# # response = openai.ChatCompletion.create(
|
||||||
|
# # model=self.ai_model,
|
||||||
|
# # messages=messages,
|
||||||
|
# # # 返回的回答数量
|
||||||
|
# # n=1,
|
||||||
|
# # max_tokens=max_tokens,
|
||||||
|
# # temperature=temperature,
|
||||||
|
# # top_p=top_p,
|
||||||
|
# # frequency_penalty=frequency_penalty,
|
||||||
|
# # presence_penalty=presence_penalty,
|
||||||
|
# # stop=stop,
|
||||||
|
# # request_timeout=request_timeout,
|
||||||
|
# # )
|
||||||
|
# # if 'choices' in response:
|
||||||
|
# # return response
|
||||||
|
# # todo: 两种方式一样,要调整 v 服务器的二次处理 /root/toai.py
|
||||||
|
# pdata = {
|
||||||
|
# "model": self.ai_model,
|
||||||
|
# "messages": messages,
|
||||||
|
# "max_tokens": max_tokens,
|
||||||
|
# "temperature": temperature,
|
||||||
|
# "top_p": top_p,
|
||||||
|
# "frequency_penalty": frequency_penalty,
|
||||||
|
# "presence_penalty": presence_penalty,
|
||||||
|
# "stop": stop
|
||||||
|
# }
|
||||||
|
# response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||||
|
# try:
|
||||||
|
# res = response.json()
|
||||||
|
# if 'choices' in res:
|
||||||
|
# return res
|
||||||
|
# except Exception as e:
|
||||||
|
# _logger.warning("Get Response Json failed: %s", e)
|
||||||
|
# else:
|
||||||
|
# _logger.warning('=====================Openai output data: %s' % response.json())
|
||||||
|
# elif self.ai_model == 'dall-e2':
|
||||||
|
# # todo: 处理 图像引擎,主要是返回参数到聊天中
|
||||||
|
# # image_url = response['data'][0]['url']
|
||||||
|
# # https://platform.openai.com/docs/guides/images/introduction
|
||||||
|
# pdata = {
|
||||||
|
# "prompt": data,
|
||||||
|
# "n": 3,
|
||||||
|
# "size": "1024x1024",
|
||||||
|
# }
|
||||||
|
# return '建设中'
|
||||||
|
# else:
|
||||||
|
# pdata = {
|
||||||
|
# "model": self.ai_model,
|
||||||
|
# "prompt": data,
|
||||||
|
# "temperature": 1,
|
||||||
|
# "max_tokens": max_tokens,
|
||||||
|
# "top_p": 0.6,
|
||||||
|
# "frequency_penalty": 0.1,
|
||||||
|
# "presence_penalty": 0.1,
|
||||||
|
# "stop": stop
|
||||||
|
# }
|
||||||
|
# response = openai.ChatCompletion.create(
|
||||||
|
# model=self.ai_model,
|
||||||
|
# messages=data
|
||||||
|
# )
|
||||||
|
# # response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
|
||||||
|
# if 'choices' in response:
|
||||||
|
# return response
|
||||||
|
# else:
|
||||||
|
# _logger.warning('=====================openai output data: %s' % response.json())
|
||||||
|
|
||||||
|
# return _("Response Timeout, please speak again.")
|
||||||
|
|
||||||
def get_azure(self, data, author_id, answer_id, param={}):
|
def get_azure(self, data, author_id, answer_id, param={}):
|
||||||
self.ensure_one()
|
self.ensure_one()
|
||||||
# only for azure
|
# only for azure
|
||||||
openai.api_type = self.provider
|
|
||||||
if not self.endpoint:
|
if not self.endpoint:
|
||||||
raise UserError(_("Please Set your AI robot's endpoint first."))
|
raise UserError(_("Please Set your AI robot's endpoint first."))
|
||||||
openai.api_base = self.endpoint
|
|
||||||
if not self.api_version:
|
if not self.api_version:
|
||||||
raise UserError(_("Please Set your AI robot's API Version first."))
|
raise UserError(_("Please Set your AI robot's API Version first."))
|
||||||
openai.api_version = self.api_version
|
|
||||||
openai.api_key = self.openapi_api_key
|
|
||||||
if self.stop:
|
if self.stop:
|
||||||
stop = self.stop.split(',')
|
stop = self.stop.split(',')
|
||||||
else:
|
else:
|
||||||
@@ -370,8 +428,15 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
if sys_content:
|
if sys_content:
|
||||||
messages.insert(0, sys_content)
|
messages.insert(0, sys_content)
|
||||||
# 暂时不变
|
# 暂时不变
|
||||||
response = openai.ChatCompletion.create(
|
|
||||||
engine=self.engine,
|
client = AzureOpenAI(
|
||||||
|
api_version=self.api_version,
|
||||||
|
azure_endpoint=self.endpoint,
|
||||||
|
api_key=self.openapi_api_key,
|
||||||
|
timeout=request_timeout
|
||||||
|
)
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model=self.engine,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
# 返回的回答数量
|
# 返回的回答数量
|
||||||
n=1,
|
n=1,
|
||||||
@@ -381,10 +446,10 @@ GPT-3 A set of models that can understand and generate natural language
|
|||||||
frequency_penalty=frequency_penalty,
|
frequency_penalty=frequency_penalty,
|
||||||
presence_penalty=presence_penalty,
|
presence_penalty=presence_penalty,
|
||||||
stop=None,
|
stop=None,
|
||||||
request_timeout=request_timeout,
|
|
||||||
)
|
)
|
||||||
if 'choices' in response:
|
res = response.model_dump()
|
||||||
return response
|
if 'choices' in res:
|
||||||
|
return res
|
||||||
else:
|
else:
|
||||||
_logger.warning('=====================azure output data: %s' % response.json())
|
_logger.warning('=====================azure output data: %s' % response.json())
|
||||||
return _("Response Timeout, please speak again.")
|
return _("Response Timeout, please speak again.")
|
||||||
|
|||||||
@@ -264,7 +264,7 @@ class Channel(models.Model):
|
|||||||
if ai and answer_id != message.author_id:
|
if ai and answer_id != message.author_id:
|
||||||
api_key = ai.openapi_api_key
|
api_key = ai.openapi_api_key
|
||||||
if not api_key:
|
if not api_key:
|
||||||
_logger.warning(_("ChatGPT Robot【%s】have not set open api key."))
|
_logger.warning(_("ChatGPT Robot【%s】have not set open api key.") % ai.name)
|
||||||
return rdata
|
return rdata
|
||||||
try:
|
try:
|
||||||
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60
|
openapi_context_timeout = int(self.env['ir.config_parameter'].sudo().get_param('app_chatgpt.openapi_context_timeout')) or 60
|
||||||
|
|||||||
@@ -2,16 +2,15 @@
|
|||||||
<div class="oe_row oe_spaced" >
|
<div class="oe_row oe_spaced" >
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<h2 class="oe_slogan"> Latest ChatGPT AI Center. GPT 3.5, Ali Ai, Baidu Ai, Multi Robot Support. Chat and Training </h2>
|
<h2 class="oe_slogan"> Latest ChatGPT AI Center. GPT 3.5, Ali Ai, Baidu Ai, Multi Robot Support. Chat and Training </h2>
|
||||||
<h3 class="oe_slogan"> Support chatgpt 4 32k. 3.5 turbo, text-davinci, Integration All ChatGpt Api. </h3>
|
<h3 class="oe_slogan"> Support GPT-4o, chatgpt 4 32k. 3.5 turbo, text-davinci, Integration All ChatGpt Api. </h3>
|
||||||
<div class="oe_row">
|
<div class="oe_row">
|
||||||
<h3>Latest update: v16.23.09.27</h3>
|
<h3>Latest update: v16.24.05.24</h3>
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="row">
|
<div class="row">
|
||||||
Add Alibaba Qwen support(search 'app_ai_ali'), update chatgpt api
|
Add Alibaba Qwen support(search 'app_ai_ali'), update chatgpt api
|
||||||
</div>
|
</div>
|
||||||
<img class="oe_demo oe_screenshot img img-fluid" src="demo02.jpg">
|
<img class="oe_demo oe_screenshot img img-fluid" src="demo02.jpg">
|
||||||
</div>
|
</div>
|
||||||
<h3>Latest update: v16.23.03.16</h3>
|
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<img class="oe_demo oe_screenshot img img-fluid" style="max-height: 100%;" src="banner.png">
|
<img class="oe_demo oe_screenshot img img-fluid" style="max-height: 100%;" src="banner.png">
|
||||||
</div>
|
</div>
|
||||||
@@ -111,7 +110,7 @@
|
|||||||
<div class="oe_demo oe_screenshot img img-fluid">
|
<div class="oe_demo oe_screenshot img img-fluid">
|
||||||
<img src="demo1.jpg"/>
|
<img src="demo1.jpg"/>
|
||||||
</div>
|
</div>
|
||||||
<p> You can set the Temperature higer for more creative answer.</p>
|
<p> You can set the Temperature higher for more creative answer.</p>
|
||||||
<div class="oe_demo oe_screenshot img img-fluid">
|
<div class="oe_demo oe_screenshot img img-fluid">
|
||||||
<img src="demo2.jpg"/>
|
<img src="demo2.jpg"/>
|
||||||
</div>
|
</div>
|
||||||
@@ -126,7 +125,7 @@
|
|||||||
<div class="oe_demo oe_screenshot img img-fluid">
|
<div class="oe_demo oe_screenshot img img-fluid">
|
||||||
<img src="demo2.jpg"/>
|
<img src="demo2.jpg"/>
|
||||||
</div>
|
</div>
|
||||||
<p> You can set the Temperature higer for more creative answer.</p>
|
<p> You can set the Temperature higher for more creative answer.</p>
|
||||||
<div class="oe_demo oe_screenshot img img-fluid">
|
<div class="oe_demo oe_screenshot img img-fluid">
|
||||||
<img src="demo3.jpg"/>
|
<img src="demo3.jpg"/>
|
||||||
</div>
|
</div>
|
||||||
@@ -226,6 +225,18 @@
|
|||||||
<h2 class="text-center bg-info text-white pt16 pb16">- How to setup and use -</h2>
|
<h2 class="text-center bg-info text-white pt16 pb16">- How to setup and use -</h2>
|
||||||
</section>
|
</section>
|
||||||
<section class="oe_container container">
|
<section class="oe_container container">
|
||||||
|
<div class="oe_row oe_spaced">
|
||||||
|
<h4 class="pt16">
|
||||||
|
To install this addon, you need to install official OpenAI Python dependencies.
|
||||||
|
</h4>
|
||||||
|
<code>
|
||||||
|
pip install openai
|
||||||
|
</code>
|
||||||
|
or
|
||||||
|
<code>
|
||||||
|
pip install openai --upgrade
|
||||||
|
</code>
|
||||||
|
</div>
|
||||||
<div class="oe_row oe_spaced">
|
<div class="oe_row oe_spaced">
|
||||||
<h4 class="pt16">
|
<h4 class="pt16">
|
||||||
1. Get ChatGPT Api key from openai or azure.
|
1. Get ChatGPT Api key from openai or azure.
|
||||||
@@ -261,13 +272,13 @@
|
|||||||
<img src="setup5.jpg">
|
<img src="setup5.jpg">
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<h5 class="pt16">6. You can setup the Ai system context and Character.</h5>
|
<h4 class="pt16">6. You can setup the Ai system context and Character.</h4>
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="oe_demo oe_screenshot img img-fluid">
|
<div class="oe_demo oe_screenshot img img-fluid">
|
||||||
<img src="setup6.jpg">
|
<img src="setup6.jpg">
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<h5 class="pt16">7. You can setup Lots of channel for subjects and projects..</h5>
|
<h4 class="pt16">7. You can setup Lots of channel for subjects and projects..</h4>
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="oe_demo oe_screenshot img img-fluid">
|
<div class="oe_demo oe_screenshot img img-fluid">
|
||||||
<img src="setup7.jpg">
|
<img src="setup7.jpg">
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
<attribute name="multi_edit">1</attribute>
|
<attribute name="multi_edit">1</attribute>
|
||||||
</xpath>
|
</xpath>
|
||||||
<field name="sequence" position="after">
|
<field name="sequence" position="after">
|
||||||
<field name="id" optional="hihde"/>
|
<field name="id" optional="hide"/>
|
||||||
</field>
|
</field>
|
||||||
<field name="complete_name" position="after">
|
<field name="complete_name" position="after">
|
||||||
<field name="name" optional="show"/>
|
<field name="name" optional="show"/>
|
||||||
|
|||||||
Reference in New Issue
Block a user