fix 增加 3.5 chatgpt

This commit is contained in:
ivan deng
2023-03-03 19:07:40 +08:00
parent 40acaa0745
commit 6a3c1ff6f5
7 changed files with 1770 additions and 25 deletions

View File

@@ -21,7 +21,7 @@
'license': 'AGPL-3',
'images': ['static/description/banner.png'],
'summary': '''
Multi Odoo ChatGPT Robot. Integration All ChatGpt Api.
Multi Odoo ChatGPT Robot. Support chatgpt 3.5 turbo, text-davinci, DALL·E, Integration All ChatGpt Api.
Chat channel with several ChatGPT Robots.
Whitelist and blacklist for Users or IP.
Base on is_chatgpt_integration from InTechual Solutions.

View File

@@ -10,8 +10,28 @@ class AiRobot(models.Model):
_order = 'sequence, name'
name = fields.Char(string='Name', translate=True)
provider = fields.Selection(string="AI Provider", selection=[('openai', 'OpenAI')], required=True, default='openai')
ai_model = fields.Selection(string="AI Model", selection=[
('gpt-3.5-turbo', 'Chatgpt 3.5 Turbo'),
('gpt-3.5-turbo-0301', 'Chatgpt 3.5 Turbo on 20230301'),
('text-davinci-003', 'Chatgpt 3 Davinci'),
('code-davinci-002', 'Chatgpt 2 Code Optimized'),
('text-davinci-002', 'Chatgpt 2 Davinci'),
('dall-e2', 'Dall-E Image'),
], required=True, default='gpt-3.5-turbo',
help="""
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
DALL·E: A model that can generate and edit images given a natural language prompt
Whisper: A model that can convert audio into text
Embeddings: A set of models that can convert text into a numerical form
CodexLimited: A set of models that can understand and generate code, including translating natural language to code
Moderation: A fine-tuned model that can detect whether text may be sensitive or unsafe
GPT-3 A set of models that can understand and generate natural language
""")
openapi_api_key = fields.Char(string="API Key", help="Provide the API key here")
temperature = fields.Float(string='Temperature', default=0.9)
sequence = fields.Integer('Sequence', help="Determine the display order", default=10)
def action_disconnect(self):

View File

@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020-Present InTechual Solutions. (<https://intechualsolutions.com/>)
import openai
import requests,json
import datetime
# from transformers import TextDavinciTokenizer, TextDavinciModel
from odoo import api, fields, models, _
from odoo.exceptions import UserError
import logging
@@ -14,25 +15,59 @@ class Channel(models.Model):
_inherit = 'mail.channel'
@api.model
def get_openai(self, api_key, data, user="Odoo"):
def get_openai(self, api_key, ai_model, data, user="Odoo"):
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
pdata = {
"model": "text-davinci-003",
"prompt": data,
"temperature": 0.9,
"max_tokens": 2000,
"top_p": 1,
"frequency_penalty": 0.0,
"presence_penalty": 0.6,
"user": user,
"stop": ["Human:", "AI:"]
}
response = requests.post("https://api.openai.com/v1/completions", data=json.dumps(pdata), headers=headers)
res = response.json()
if 'choices' in res:
return '\n'.join([x['text'] for x in res['choices']])
_logger.error(res)
if ai_model == 'dall-e2':
# todo: 处理 图像引擎,主要是返回参数到聊天中
# image_url = response['data'][0]['url']
# https://platform.openai.com/docs/guides/images/introduction
pdata = {
"prompt": data,
"n": 3,
"size": "1024x1024",
}
return '建设中'
elif ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
pdata = {
"model": ai_model,
"messages": [{"role": "user", "content": data}],
"temperature": 0.9,
"max_tokens": 2000,
"top_p": 1,
"frequency_penalty": 0.0,
"presence_penalty": 0.6,
"user": user,
"stop": ["Human:", "AI:"]
}
response = requests.post("https://api.openai.com/v1/chat/completions", data=json.dumps(pdata), headers=headers)
res = response.json()
if 'choices' in res:
# for rec in res:
# res = rec['message']['content']
res = '\n'.join([x['message']['content'] for x in res['choices']])
return res
else:
pdata = {
"model": ai_model,
"prompt": data,
"temperature": 0.9,
"max_tokens": 2000,
"top_p": 1,
"frequency_penalty": 0.0,
"presence_penalty": 0.6,
"user": user,
"stop": ["Human:", "AI:"]
}
response = requests.post("https://api.openai.com/v1/completions", data=json.dumps(pdata), headers=headers)
res = response.json()
if 'choices' in res:
res = '\n'.join([x['text'] for x in res['choices']])
return res
# 获取模型信息
# list_model = requests.get("https://api.openai.com/v1/models", headers=headers)
# model_info = requests.get("https://api.openai.com/v1/models/%s" % ai_model, headers=headers)
return "获取结果超时,请重新跟我聊聊。"
@api.model
@@ -142,17 +177,18 @@ class Channel(models.Model):
# print(msg_vals.get('record_name', ''))
# print('self.channel_type :',self.channel_type)
if gpt_id:
chatgpt_name = str(gpt_id.name or '') + ', '
ai_model = gpt_id.ai_model or 'text-davinci-003'
# print('chatgpt_name:', chatgpt_name)
# if author_id != to_partner_id.id and (chatgpt_name in msg_vals.get('record_name', '') or 'ChatGPT' in msg_vals.get('record_name', '') ) and self.channel_type == 'chat':
if author_id != to_partner_id.id and self.channel_type == 'chat':
_logger.info(f'私聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
try:
channel = self.env[msg_vals.get('model')].browse(msg_vals.get('res_id'))
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt,openapi_context_timeout)
if ai_model not in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']:
prompt = self.get_openai_context(channel.id, to_partner_id.id, prompt, openapi_context_timeout)
print(prompt)
# res = self.get_chatgpt_answer(prompt,partner_name)
res = self.get_openai(api_key, prompt, partner_name)
res = self.get_openai(api_key, ai_model, prompt, partner_name)
res = res.replace('\n', '<br/>')
# print('res:',res)
# print('channel:',channel)
@@ -171,10 +207,10 @@ class Channel(models.Model):
elif author_id != to_partner_id.id and msg_vals.get('model', '') == 'mail.channel' and msg_vals.get('res_id', 0) == chatgpt_channel_id.id:
_logger.info(f'频道群聊:author_id:{author_id},partner_chatgpt.id:{to_partner_id.id}')
try:
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt,openapi_context_timeout)
prompt = self.get_openai_context(chatgpt_channel_id.id, to_partner_id.id, prompt, openapi_context_timeout)
# print(prompt)
# res = self.get_chatgpt_answer(prompt, partner_name)
res = self.get_openai(api_key, prompt, partner_name)
res = self.get_openai(api_key, ai_model, prompt, partner_name)
res = res.replace('\n', '<br/>')
chatgpt_channel_id.with_user(user_id).message_post(body=res, message_type='comment', subtype_xmlid='mail.mt_comment',parent_id=message.id)
except Exception as e:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,24 @@
{
"id": "gpt-3.5-turbo",
"object": "model",
"created": 1677610602,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-ZErASyl63fhYUeMMk7QKOHAB",
"object": "model_permission",
"created": 1677691854,
"allow_create_engine": false,
"allow_sampling": true,
"allow_logprobs": true,
"allow_search_indices": false,
"allow_view": true,
"allow_fine_tuning": false,
"organization": "*",
"group": null,
"is_blocking": false
}
],
"root": "gpt-3.5-turbo",
"parent": null
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 261 KiB

After

Width:  |  Height:  |  Size: 265 KiB

View File

@@ -7,6 +7,8 @@
<tree>
<field name="sequence" widget="handle"/>
<field name="name"/>
<field name="provider" optional="hide"/>
<field name="ai_model" optional="show"/>
<field name="openapi_api_key" password="True"/>
<field name="temperature"/>
</tree>
@@ -22,6 +24,8 @@
<group>
<group>
<field name="name"/>
<field name="provider"/>
<field name="ai_model"/>
<field name="openapi_api_key" password="True"/>
<field name="temperature"/>
<field name="sequence"/>