update app_chatgpt: get_openai

This commit is contained in:
Chill
2024-04-01 15:47:31 +08:00
parent 6f1b1cb1d6
commit ab34243fe9
5 changed files with 22 additions and 65 deletions

View File

@@ -10,7 +10,7 @@
{ {
'name': 'ChatGPT4, China Ali,AiGC Center.Ai服务中心聚合全网Ai', 'name': 'ChatGPT4, China Ali,AiGC Center.Ai服务中心聚合全网Ai',
'version': '24.03.29', 'version': '24.04.01',
'author': 'odooai.cn', 'author': 'odooai.cn',
'company': 'odooai.cn', 'company': 'odooai.cn',
'maintainer': 'odooai.cn', 'maintainer': 'odooai.cn',

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import os
from openai import OpenAI from openai import OpenAI
from openai import AzureOpenAI from openai import AzureOpenAI
import requests, json import requests, json
@@ -23,7 +24,8 @@ class AiRobot(models.Model):
# update ai_robot set ai_model=set_ai_model # update ai_robot set ai_model=set_ai_model
ai_model = fields.Char(string="AI Model", required=True, default='auto', help='Customize input') ai_model = fields.Char(string="AI Model", required=True, default='auto', help='Customize input')
set_ai_model = fields.Selection(string="Quick Set Model", selection=[ set_ai_model = fields.Selection(string="Quick Set Model", selection=[
('gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0613(Default and Latest)'), ('gpt-3.5-turbo-0125', 'gpt-3.5-turbo-0125(Default and Latest)'),
('gpt-3.5-turbo-0613', 'gpt-3.5-turbo-0613'),
('gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-16k-0613(Big text)'), ('gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-16k-0613(Big text)'),
('gpt-4', 'Chatgpt 4'), ('gpt-4', 'Chatgpt 4'),
('gpt-4-32k', 'Chatgpt 4 32k'), ('gpt-4-32k', 'Chatgpt 4 32k'),
@@ -33,7 +35,7 @@ class AiRobot(models.Model):
('code-davinci-002', 'Chatgpt 2 Code Optimized'), ('code-davinci-002', 'Chatgpt 2 Code Optimized'),
('text-davinci-002', 'Chatgpt 2 Davinci'), ('text-davinci-002', 'Chatgpt 2 Davinci'),
('dall-e2', 'Dall-E Image'), ('dall-e2', 'Dall-E Image'),
], default='gpt-3.5-turbo-0613', ], default='gpt-3.5-turbo-0125',
help=""" help="""
GPT-4: Can understand Image, generate natural language or code. GPT-4: Can understand Image, generate natural language or code.
GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code GPT-3.5: A set of models that improve on GPT-3 and can understand as well as generate natural language or code
@@ -257,56 +259,7 @@ GPT-3 A set of models that can understand and generate natural language
else: else:
stop = ["Human:", "AI:"] stop = ["Human:", "AI:"]
# 以下处理 open ai # 以下处理 open ai
if self.ai_model in ['gpt-3.5-turbo', 'gpt-3.5-turbo-0301']: if self.ai_model == 'dall-e2':
# 基本与 azure 同,要处理 api_base
# openai.api_key = self.openapi_api_key
# openai.api_base = o_url.replace('/chat/completions', '')
if isinstance(data, list):
messages = data
else:
messages = [{"role": "user", "content": data}]
# Ai角色设定如果没设定则再处理
if messages[0].get('role') != 'system':
sys_content = self.get_ai_system(param.get('sys_content'))
if sys_content:
messages.insert(0, sys_content)
# todo: 当前反向代理方式不通,要调整为 远程主机中接受请求post到openai再将结果返回给请求者
# response = openai.ChatCompletion.create(
# model=self.ai_model,
# messages=messages,
# # 返回的回答数量
# n=1,
# max_tokens=max_tokens,
# temperature=temperature,
# top_p=top_p,
# frequency_penalty=frequency_penalty,
# presence_penalty=presence_penalty,
# stop=stop,
# request_timeout=request_timeout,
# )
# if 'choices' in response:
# return response
# todo: 两种方式一样,要调整 v 服务器的二次处理 /root/toai.py
pdata = {
"model": self.ai_model,
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"stop": stop
}
response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT)
try:
res = response.json()
if 'choices' in res:
return res
except Exception as e:
_logger.warning("Get Response Json failed: %s", e)
else:
_logger.warning('=====================Openai output data: %s' % response.json())
elif self.ai_model == 'dall-e2':
# todo: 处理 图像引擎,主要是返回参数到聊天中 # todo: 处理 图像引擎,主要是返回参数到聊天中
# image_url = response['data'][0]['url'] # image_url = response['data'][0]['url']
# https://platform.openai.com/docs/guides/images/introduction # https://platform.openai.com/docs/guides/images/introduction
@@ -327,14 +280,17 @@ GPT-3 A set of models that can understand and generate natural language
"presence_penalty": 0.1, "presence_penalty": 0.1,
"stop": stop "stop": stop
} }
client = OpenAI(api_key=self.openapi_api_key) client = OpenAI(
api_key=self.openapi_api_key,
timeout=R_TIMEOUT
)
response = client.chat.completions.create( response = client.chat.completions.create(
messages=data,
model=self.ai_model, model=self.ai_model,
messages=data
) )
# response = requests.post(o_url, data=json.dumps(pdata), headers=headers, timeout=R_TIMEOUT) res = response.model_dump()
if 'choices' in response: if 'choices' in res:
return response return res
else: else:
_logger.warning('=====================openai output data: %s' % response.json()) _logger.warning('=====================openai output data: %s' % response.json())

View File

@@ -312,10 +312,11 @@ class Channel(models.Model):
# if msg_len * 2 >= 8000: # if msg_len * 2 >= 8000:
# messages = [{"role": "user", "content": msg}] # messages = [{"role": "user", "content": msg}]
if sync_config == 'sync': self.get_ai_response(ai, messages, channel, user_id, message)
self.get_ai_response(ai, messages, channel, user_id, message) # if sync_config == 'sync':
else: # self.get_ai_response(ai, messages, channel, user_id, message)
self.with_delay().get_ai_response(ai, messages, channel, user_id, message) # else:
# self.with_delay().get_ai_response(ai, messages, channel, user_id, message)
except Exception as e: except Exception as e:
raise UserError(_(e)) raise UserError(_(e))

View File

@@ -11,7 +11,7 @@ class ResPartner(models.Model):
is_chat_private = fields.Boolean('Allow Chat Private', default=False) is_chat_private = fields.Boolean('Allow Chat Private', default=False)
@api.model @api.model
def im_search(self, name, limit=20): def im_search(self, name, limit=20, excluded_ids=None):
users = self.env['res.users'].search([ users = self.env['res.users'].search([
('id', '!=', self.env.user.id), ('id', '!=', self.env.user.id),
('name', 'ilike', name), ('name', 'ilike', name),

View File

@@ -4,14 +4,14 @@
<h2 class="oe_slogan"> Latest ChatGPT AI Center. GPT 3.5, Ali Ai, Baidu Ai, Multi Robot Support. Chat and Training </h2> <h2 class="oe_slogan"> Latest ChatGPT AI Center. GPT 3.5, Ali Ai, Baidu Ai, Multi Robot Support. Chat and Training </h2>
<h3 class="oe_slogan"> Support chatgpt 4 32k. 3.5 turbo, text-davinci, Integration All ChatGpt Api. </h3> <h3 class="oe_slogan"> Support chatgpt 4 32k. 3.5 turbo, text-davinci, Integration All ChatGpt Api. </h3>
<div class="oe_row"> <div class="oe_row">
<h3>Lastest update: v16.23.09.27</h3> <h3>Latest update: v17.24.04.01</h3>
<div class="row"> <div class="row">
<div class="row"> <div class="row">
Add Alibaba Qwen support(search 'app_ai_ali'), update chatgpt api Add Alibaba Qwen support(search 'app_ai_ali'), update chatgpt api
</div> </div>
<img class="oe_demo oe_screenshot img img-fluid" src="demo02.jpg"> <img class="oe_demo oe_screenshot img img-fluid" src="demo02.jpg">
</div> </div>
<h3>Lastest update: v17.24.03.29</h3> <h3>Latest update: v17.24.04.01</h3>
<div class="row"> <div class="row">
<img class="oe_demo oe_screenshot img img-fluid" style="max-height: 100%;" src="banner.png"> <img class="oe_demo oe_screenshot img img-fluid" style="max-height: 100%;" src="banner.png">
</div> </div>