Compare commits
150 Commits
add-licens
...
upate-read
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f42ed825a0 | ||
|
|
355b2979d8 | ||
|
|
846ba8c135 | ||
|
|
992caddc30 | ||
|
|
98d2b4109e | ||
|
|
d5d5d8b1d0 | ||
|
|
16e85fff4e | ||
|
|
aacd2cf5f4 | ||
|
|
bf5d773e52 | ||
|
|
4b9875b99f | ||
|
|
68359d9a9a | ||
|
|
7ad5b95842 | ||
|
|
90e7797a4c | ||
|
|
75e97d01b0 | ||
|
|
a9581e098f | ||
|
|
e53697f082 | ||
|
|
f43107aa76 | ||
|
|
f028d1d6a4 | ||
|
|
9f09650780 | ||
|
|
42ed99472d | ||
|
|
3f067ac81a | ||
|
|
097faaca1e | ||
|
|
96eab8ed1d | ||
|
|
db9f3a5fa7 | ||
|
|
3bacb31832 | ||
|
|
73618a9161 | ||
|
|
d0e9b9de97 | ||
|
|
d60d6fa355 | ||
|
|
0def87377b | ||
|
|
afcfb8e959 | ||
|
|
e91e1fbaff | ||
|
|
71f507ee94 | ||
|
|
0265144b38 | ||
|
|
592bbaab86 | ||
|
|
89cbaf7b61 | ||
|
|
670b2b664e | ||
|
|
b0bfbf8fdc | ||
|
|
06b89317ae | ||
|
|
54d343aa71 | ||
|
|
81e1c5d5a8 | ||
|
|
0380c8508b | ||
|
|
b8bfe308e6 | ||
|
|
2213246de9 | ||
|
|
56813e0ebc | ||
|
|
20abe04049 | ||
|
|
f57b949d30 | ||
|
|
12da6922dc | ||
|
|
bede76377b | ||
|
|
7700f92840 | ||
|
|
7e858de0d4 | ||
|
|
af6dcde39b | ||
|
|
197391d3e8 | ||
|
|
e6e3377437 | ||
|
|
c50cca3870 | ||
|
|
8c2e8a233f | ||
|
|
e341c75e5e | ||
|
|
c7f00a9dac | ||
|
|
d308f753be | ||
|
|
733395cc0a | ||
|
|
cfce83bb84 | ||
|
|
f236f0764e | ||
|
|
a2ed9c690d | ||
|
|
746dd870b2 | ||
|
|
7e27bfff71 | ||
|
|
a0c518896e | ||
|
|
c02eb6c71d | ||
|
|
8350bc6842 | ||
|
|
600525e550 | ||
|
|
1520d8c484 | ||
|
|
0f5be8831a | ||
|
|
f48a1b7aa9 | ||
|
|
c85ee31963 | ||
|
|
304d149ba6 | ||
|
|
640a542d1f | ||
|
|
c08375d0bb | ||
|
|
8332af1181 | ||
|
|
a34566b55b | ||
|
|
310ae52d3f | ||
|
|
77006dd9e0 | ||
|
|
98afd5b985 | ||
|
|
4f94ba1604 | ||
|
|
878a2100bc | ||
|
|
40746635be | ||
|
|
78662c779a | ||
|
|
e430481aa7 | ||
|
|
da890d8599 | ||
|
|
2c35840190 | ||
|
|
2c720f83a8 | ||
|
|
94282777ea | ||
|
|
2200e777c1 | ||
|
|
95fa113a60 | ||
|
|
d97cbceb0d | ||
|
|
5f93e91c35 | ||
|
|
ceaa47b9b2 | ||
|
|
a37920b27b | ||
|
|
027cc87505 | ||
|
|
8bc0d254aa | ||
|
|
a9d4b38a7e | ||
|
|
20af89c0c2 | ||
|
|
99d36a7753 | ||
|
|
1be1e440fd | ||
|
|
8076fa5e02 | ||
|
|
dfeeac4943 | ||
|
|
66811991ad | ||
|
|
a03827c753 | ||
|
|
fce2dfe387 | ||
|
|
0aa328dd9f | ||
|
|
b42264437a | ||
|
|
d816f87410 | ||
|
|
aebb88da37 | ||
|
|
0d252005c8 | ||
|
|
8f76adbf60 | ||
|
|
a67ffd8d8b | ||
|
|
d3eebd2527 | ||
|
|
85a1961b64 | ||
|
|
121976b3b7 | ||
|
|
261b39eebe | ||
|
|
789b209d03 | ||
|
|
0ed755680e | ||
|
|
dbfeab7242 | ||
|
|
68fe711061 | ||
|
|
de1a201c61 | ||
|
|
c70a257baa | ||
|
|
2b30701b2e | ||
|
|
f8780b03b2 | ||
|
|
aefb74822a | ||
|
|
0ae0268d54 | ||
|
|
c7a7c7bced | ||
|
|
e6fc0dc96b | ||
|
|
3c579964ad | ||
|
|
b9cafe8c23 | ||
|
|
a485cfb180 | ||
|
|
fce0656ab1 | ||
|
|
fa113e6fa7 | ||
|
|
b2459a5897 | ||
|
|
b31d053191 | ||
|
|
aa387bd4af | ||
|
|
b88dbfe4d3 | ||
|
|
17cc9ea06c | ||
|
|
bfd2259684 | ||
|
|
e6289bf53a | ||
|
|
9f5552b1e3 | ||
|
|
05f3695e27 | ||
|
|
3388d70696 | ||
|
|
2bcafcd27c | ||
|
|
001768b77d | ||
|
|
ab538ac3e0 | ||
|
|
cad2b5a9da | ||
|
|
63a1ee45b5 | ||
|
|
ba48140de9 |
13
.github/FUNDING.yml
vendored
Normal file
13
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
github: [onlp]
|
||||||
|
patreon: xtekky
|
||||||
|
open_collective: # Replace with a single Open Collective username
|
||||||
|
ko_fi: xtekky
|
||||||
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
|
liberapay: tekky
|
||||||
|
issuehunt: xtekky
|
||||||
|
otechie: # Replace with a single Otechie username
|
||||||
|
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||||
|
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||||
16
.gitignore
vendored
Normal file
16
.gitignore
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Default ignored files
|
||||||
|
/shelf/
|
||||||
|
/workspace.xml
|
||||||
|
# Editor-based HTTP Client requests
|
||||||
|
/httpRequests/
|
||||||
|
# Datasource local storage ignored files
|
||||||
|
/dataSources/
|
||||||
|
/dataSources.local.xml
|
||||||
|
|
||||||
|
.idea/
|
||||||
|
|
||||||
|
*/__pycache__/
|
||||||
|
|
||||||
|
*.log
|
||||||
|
|
||||||
|
cookie.json
|
||||||
12
Docker/Dockerfile
Normal file
12
Docker/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
FROM python:3.10-slim
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y git
|
||||||
|
|
||||||
|
RUN git clone https://github.com/xtekky/gpt4free.git
|
||||||
|
WORKDIR /gpt4free
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
RUN cp gui/streamlit_app.py .
|
||||||
|
|
||||||
|
EXPOSE 8501
|
||||||
|
|
||||||
|
CMD ["streamlit", "run", "streamlit_app.py"]
|
||||||
393
README.md
393
README.md
@@ -1,316 +1,135 @@
|
|||||||
# Free LLM APIs
|
# GPT4free - use ChatGPT, for free!!
|
||||||
|
|
||||||
This repository provides reverse-engineered language models from various sources. Some of these models are already available in the repo, while others are currently being worked on.
|
##### You may join our discord server for updates and support ; )
|
||||||
|
- [Discord Link](https://discord.gg/gpt4free)
|
||||||
|
|
||||||
> **Important:** If you come across any website offering free language models, please create an issue or submit a pull request with the details. We will reverse engineer it and add it to this repository.
|
<img width="1383" alt="image" src="https://user-images.githubusercontent.com/98614666/233799515-1a7cb6a3-b17f-42c4-956d-8d2a0664466f.png">
|
||||||
|
|
||||||
## Best Chatgpt site
|
Have you ever come across some amazing projects that you couldn't use **just because you didn't have an OpenAI API key?**
|
||||||
> https://chat.chatbot.sex/chat
|
|
||||||
> This site was developed by me and includes **gpt-4**, **internet access** and **gpt-jailbreak's** like DAN
|
|
||||||
|
|
||||||
## To-Do List
|
**We've got you covered!** This repository offers **reverse-engineered** third-party APIs for `GPT-4/3.5`, sourced from various websites. You can simply **download** this repository, and use the available modules, which are designed to be used **just like OpenAI's official package**. **Unleash ChatGPT's potential for your projects, now!** You are welcome ; ).
|
||||||
|
|
||||||
- [x] implement poe.com create bot feature | AVAILABLE NOW
|
By the way, thank you so much for `11k` stars and all the support!!
|
||||||
- [x] renaming the 'poe' module to 'quora'
|
|
||||||
- [x] add you.com api
|
## Legal Notice <a name="legal-notice"></a>
|
||||||
|
|
||||||
|
This repository uses third-party APIs and AI models and is *not* associated with or endorsed by the API providers or the original developers of the models. This project is intended **for educational purposes only**.
|
||||||
|
|
||||||
|
Please note the following:
|
||||||
|
|
||||||
|
1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is *not* claiming any right over them.
|
||||||
|
|
||||||
|
2. **Responsibility**: The author of this repository is *not* responsible for any consequences arising from the use or misuse of this repository or the content provided by the third-party APIs and any damage or losses caused by users' actions.
|
||||||
|
|
||||||
|
3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations.
|
||||||
|
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
| Section | Description | Link | Status |
|
||||||
|
| ------- | ----------- | ---- | ------ |
|
||||||
|
| **To do list** | List of tasks to be done | [](#todo) | - |
|
||||||
|
| **Current Sites** | Current websites or platforms that can be used as APIs | [](#current-sites) | - |
|
||||||
|
| **Best Sites for gpt4** | Recommended websites or platforms for gpt4 | [](#best-sites) | - |
|
||||||
|
| **Streamlit GPT4Free GUI** | Web-based graphical user interface for interacting with gpt4free | [](#streamlit-gpt4free-gui) | - |
|
||||||
|
| **Docker** | Instructions on how to run gpt4free in a Docker container | [](#docker-instructions) | - |
|
||||||
|
| **ChatGPT clone** | A ChatGPT clone with new features and scalability | [](https://chat.chatbot.sex/chat) | - |
|
||||||
|
| **How to install** | Instructions on how to install gpt4free | [](#install) | - |
|
||||||
|
| **Legal Notice** | Legal notice or disclaimer | [](#legal-notice) | - |
|
||||||
|
| **Copyright** | Copyright information | [](#copyright) | - |
|
||||||
|
| **Usage Examples** | | | |
|
||||||
|
| `forefront` | Example usage for forefront (gpt-4) | [](./forefront/README.md) |  | | |
|
||||||
|
| `quora (poe)` | Example usage for quora | [](./quora/README.md) |  | |
|
||||||
|
| `phind` | Example usage for phind | [](./phind/README.md) |  |
|
||||||
|
| `you` | Example usage for you | [](./you/README.md) |  |
|
||||||
|
| **Try it Out** | | | |
|
||||||
|
| Google Colab Jupyter Notebook | Example usage for gpt4free | [](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
|
||||||
|
| replit Example (feel free to fork this repl) | Example usage for gpt4free | [](https://replit.com/@gpt4free/gpt4free-webui) | - |
|
||||||
|
|
||||||
- [Current Sites (No Authentication / Easy Account Creation)](#current-sites)
|
|
||||||
- [Sites with Authentication (Will Reverse Engineer but Need Account Access)](#sites-with-authentication)
|
## Todo <a name="todo"></a>
|
||||||
- [Usage Examples](#usage-examples)
|
|
||||||
- [`quora (poe)`](#example-poe)
|
- [ ] Add a GUI for the repo
|
||||||
- [`phind`](#example-phind)
|
- [ ] Make a general package named `openai_rev`, instead of different folders
|
||||||
- [`t3nsor`](#example-t3nsor)
|
- [ ] Live api status to know which are down and which can be used
|
||||||
- [`ora`](#example-ora)
|
- [ ] Integrate more API's in `./unfinished` as well as other ones in the lists
|
||||||
- [`writesonic`](#example-writesonic)
|
- [ ] Make an API to use as proxy for other projects
|
||||||
- [`you`](#example-you)
|
- [ ] Make a pypi package
|
||||||
|
|
||||||
## Current Sites <a name="current-sites"></a>
|
## Current Sites <a name="current-sites"></a>
|
||||||
|
|
||||||
| Website | Model(s) |
|
| Website s | Model(s) |
|
||||||
| -------------------------- | -------------------- |
|
| ---------------------------------------------------- | ------------------------------- |
|
||||||
| [ora.sh](https://ora.sh) | GPT-3.5 / 4 |
|
| [forefront.ai](https://chat.forefront.ai) | GPT-4/3.5 |
|
||||||
| [poe.com](https://poe.com) | GPT-4/3.5 |
|
| [poe.com](https://poe.com) | GPT-4/3.5 |
|
||||||
| [writesonic.com](https://writesonic.com)|GPT-3.5 / Internet|
|
| [writesonic.com](https://writesonic.com) | GPT-3.5 / Internet |
|
||||||
| [t3nsor.com](https://t3nsor.com)|GPT-3.5|
|
| [t3nsor.com](https://t3nsor.com) | GPT-3.5 |
|
||||||
| [you.com](https://you.com)|GPT-3.5 / Internet / good search|
|
| [you.com](https://you.com) | GPT-3.5 / Internet / good search|
|
||||||
| [phind.com](https://phind.com)|GPT-4 / Internet / good search|
|
| [phind.com](https://phind.com) | GPT-4 / Internet / good search |
|
||||||
|
| [sqlchat.ai](https://sqlchat.ai) | GPT-3.5 |
|
||||||
|
| [chat.openai.com/chat](https://chat.openai.com/chat) | GPT-3.5 |
|
||||||
|
| [bard.google.com](https://bard.google.com) | custom / search |
|
||||||
|
| [bing.com/chat](https://bing.com/chat) | GPT-4/3.5 |
|
||||||
|
| [chat.forefront.ai/](https://chat.forefront.ai/) | GPT-4/3.5 |
|
||||||
|
|
||||||
## Sites with Authentication <a name="sites-with-authentication"></a>
|
## Best sites <a name="best-sites"></a>
|
||||||
|
|
||||||
These sites will be reverse engineered but need account access:
|
#### gpt-4
|
||||||
|
- [`/phind`](./phind/README.md)
|
||||||
|
- pro: only stable gpt-4 with streaming ( no limit )
|
||||||
|
- contra: weird backend prompting
|
||||||
|
- why not `ora` anymore ? gpt-4 requires login + limited
|
||||||
|
|
||||||
* [chat.openai.com/chat](https://chat.openai.com/chat)
|
#### gpt-3.5
|
||||||
* [bard.google.com](https://bard.google.com)
|
- looking for a stable api at the moment
|
||||||
* [bing.com/chat](https://bing.com/chat)
|
|
||||||
|
|
||||||
## Usage Examples <a name="usage-examples"></a>
|
## Install <a name="install"></a>
|
||||||
|
download or clone this GitHub repo
|
||||||
### Example: `quora (poe)` (use like openai pypi package) - GPT-4 <a name="example-poe"></a>
|
install requirements with:
|
||||||
|
```sh
|
||||||
```python
|
pip3 install -r requirements.txt
|
||||||
# quora model names: (use left key as argument)
|
|
||||||
models = {
|
|
||||||
'sage' : 'capybara',
|
|
||||||
'gpt-4' : 'beaver',
|
|
||||||
'claude-v1.2' : 'a2_2',
|
|
||||||
'claude-instant-v1.0' : 'a2',
|
|
||||||
'gpt-3.5-turbo' : 'chinchilla'
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### !! new: bot creation
|
## To start gpt4free GUI <a name="streamlit-gpt4free-gui"></a>
|
||||||
|
move `streamlit_app.py` from `./gui` to the base folder
|
||||||
|
then run:
|
||||||
|
`streamlit run streamlit_app.py` or `python3 -m streamlit run streamlit_app.py`
|
||||||
|
|
||||||
```python
|
## Docker <a name="docker-instructions"></a>
|
||||||
# import quora (poe) package
|
Build
|
||||||
import quora
|
```
|
||||||
|
docker build -t gpt4free:latest -f Docker/Dockerfile .
|
||||||
# create account
|
```
|
||||||
# make shure to set enable_bot_creation to True
|
Run
|
||||||
token = quora.Account.create(logging = True, enable_bot_creation=True)
|
```
|
||||||
|
docker run -p 8501:8501 gpt4free:latest
|
||||||
model = quora.Model.create(
|
|
||||||
token = token,
|
|
||||||
model = 'gpt-3.5-turbo', # or claude-instant-v1.0
|
|
||||||
system_prompt = 'you are ChatGPT a large language model ...'
|
|
||||||
)
|
|
||||||
|
|
||||||
print(model.name) # gptx....
|
|
||||||
|
|
||||||
# streaming response
|
|
||||||
for response in quora.StreamingCompletion.create(
|
|
||||||
custom_model = model.name,
|
|
||||||
prompt ='hello world',
|
|
||||||
token = token):
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Normal Response:
|
## ChatGPT clone
|
||||||
```python
|
> currently implementing new features and trying to scale it, please be patient it may be unstable
|
||||||
|
> https://chat.chatbot.sex/chat
|
||||||
|
> This site was developed by me and includes **gpt-4/3.5**, **internet access** and **gpt-jailbreak's** like DAN
|
||||||
|
> run locally here: https://github.com/xtekky/chatgpt-clone
|
||||||
|
|
||||||
response = quora.Completion.create(model = 'gpt-4',
|
## Copyright:
|
||||||
prompt = 'hello world',
|
This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
token = token)
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text)
|
Most code, with the exception of `quora/api.py` (by [ading2210](https://github.com/ading2210)), has been written by me, [xtekky](https://github.com/xtekky).
|
||||||
```
|
|
||||||
|
|
||||||
### Example: `phind` (use like openai pypi package) <a name="example-phind"></a>
|
### Copyright Notice: <a name="copyright"></a>
|
||||||
|
|
||||||
```python
|
|
||||||
# HELP WANTED: tls_client does not accept stream and timeout gets hit with long responses
|
|
||||||
|
|
||||||
import phind
|
|
||||||
|
|
||||||
prompt = 'hello world'
|
|
||||||
|
|
||||||
result = phind.Completion.create(
|
|
||||||
model = 'gpt-4',
|
|
||||||
prompt = prompt,
|
|
||||||
results = phind.Search.create(prompt, actualSearch = False), # create search (set actualSearch to False to disable internet)
|
|
||||||
creative = False,
|
|
||||||
detailed = False,
|
|
||||||
codeContext = '') # up to 3000 chars of code
|
|
||||||
|
|
||||||
print(result.completion.choices[0].text)
|
|
||||||
```
|
```
|
||||||
|
xtekky/openai-gpt4: multiple reverse engineered language-model api's to decentralise the ai industry.
|
||||||
|
Copyright (C) 2023 xtekky
|
||||||
|
|
||||||
### Example: `t3nsor` (use like openai pypi package) <a name="example-t3nsor"></a>
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
```python
|
This program is distributed in the hope that it will be useful,
|
||||||
# Import t3nsor
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
import t3nsor
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
# t3nsor.Completion.create
|
|
||||||
# t3nsor.StreamCompletion.create
|
|
||||||
|
|
||||||
[...]
|
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Example Chatbot
|
|
||||||
```python
|
|
||||||
messages = []
|
|
||||||
|
|
||||||
while True:
|
|
||||||
user = input('you: ')
|
|
||||||
|
|
||||||
t3nsor_cmpl = t3nsor.Completion.create(
|
|
||||||
prompt = user,
|
|
||||||
messages = messages
|
|
||||||
)
|
|
||||||
|
|
||||||
print('gpt:', t3nsor_cmpl.completion.choices[0].text)
|
|
||||||
|
|
||||||
messages.extend([
|
|
||||||
{'role': 'user', 'content': user },
|
|
||||||
{'role': 'assistant', 'content': t3nsor_cmpl.completion.choices[0].text}
|
|
||||||
])
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Streaming Response:
|
|
||||||
|
|
||||||
```python
|
|
||||||
for response in t3nsor.StreamCompletion.create(
|
|
||||||
prompt = 'write python code to reverse a string',
|
|
||||||
messages = []):
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example: `ora` (use like openai pypi package) <a name="example-ora"></a>
|
|
||||||
|
|
||||||
### load model (new)
|
|
||||||
|
|
||||||
more gpt4 models in `/testing/ora_gpt4.py`
|
|
||||||
|
|
||||||
```python
|
|
||||||
# normal gpt-4: b8b12eaa-5d47-44d3-92a6-4d706f2bcacf
|
|
||||||
model = ora.CompletionModel.load(chatbot_id, 'gpt-4') # or gpt-3.5
|
|
||||||
```
|
|
||||||
|
|
||||||
#### create model / chatbot:
|
|
||||||
```python
|
|
||||||
# inport ora
|
|
||||||
import ora
|
|
||||||
|
|
||||||
# create model
|
|
||||||
model = ora.CompletionModel.create(
|
|
||||||
system_prompt = 'You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible',
|
|
||||||
description = 'ChatGPT Openai Language Model',
|
|
||||||
name = 'gpt-3.5')
|
|
||||||
|
|
||||||
# init conversation (will give you a conversationId)
|
|
||||||
init = ora.Completion.create(
|
|
||||||
model = model,
|
|
||||||
prompt = 'hello world')
|
|
||||||
|
|
||||||
print(init.completion.choices[0].text)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
# pass in conversationId to continue conversation
|
|
||||||
|
|
||||||
prompt = input('>>> ')
|
|
||||||
response = ora.Completion.create(
|
|
||||||
model = model,
|
|
||||||
prompt = prompt,
|
|
||||||
includeHistory = True, # remember history
|
|
||||||
conversationId = init.id)
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example: `writesonic` (use like openai pypi package) <a name="example-writesonic"></a>
|
|
||||||
|
|
||||||
```python
|
|
||||||
# import writesonic
|
|
||||||
import writesonic
|
|
||||||
|
|
||||||
# create account (3-4s)
|
|
||||||
account = writesonic.Account.create(logging = True)
|
|
||||||
|
|
||||||
# with loging:
|
|
||||||
# 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
|
|
||||||
# 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
|
|
||||||
# 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
|
|
||||||
# 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
|
|
||||||
|
|
||||||
# simple completion
|
|
||||||
response = writesonic.Completion.create(
|
|
||||||
api_key = account.key,
|
|
||||||
prompt = 'hello world'
|
|
||||||
)
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text) # Hello! How may I assist you today?
|
|
||||||
|
|
||||||
# conversation
|
|
||||||
|
|
||||||
response = writesonic.Completion.create(
|
|
||||||
api_key = account.key,
|
|
||||||
prompt = 'what is my name ?',
|
|
||||||
enable_memory = True,
|
|
||||||
history_data = [
|
|
||||||
{
|
|
||||||
'is_sent': True,
|
|
||||||
'message': 'my name is Tekky'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'is_sent': False,
|
|
||||||
'message': 'hello Tekky'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text) # Your name is Tekky.
|
|
||||||
|
|
||||||
# enable internet
|
|
||||||
|
|
||||||
response = writesonic.Completion.create(
|
|
||||||
api_key = account.key,
|
|
||||||
prompt = 'who won the quatar world cup ?',
|
|
||||||
enable_google_results = True
|
|
||||||
)
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
|
|
||||||
```
|
|
||||||
|
|
||||||
### Example: `you` (use like openai pypi package) <a name="example-you"></a>
|
|
||||||
|
|
||||||
```python
|
|
||||||
import you
|
|
||||||
|
|
||||||
# simple request with links and details
|
|
||||||
response = you.Completion.create(
|
|
||||||
prompt = "hello world",
|
|
||||||
detailed = True,
|
|
||||||
includelinks = True,)
|
|
||||||
|
|
||||||
print(response)
|
|
||||||
|
|
||||||
# {
|
|
||||||
# "response": "...",
|
|
||||||
# "links": [...],
|
|
||||||
# "extra": {...},
|
|
||||||
# "slots": {...}
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
|
|
||||||
#chatbot
|
|
||||||
|
|
||||||
chat = []
|
|
||||||
|
|
||||||
while True:
|
|
||||||
prompt = input("You: ")
|
|
||||||
|
|
||||||
response = you.Completion.create(
|
|
||||||
prompt = prompt,
|
|
||||||
chat = chat)
|
|
||||||
|
|
||||||
print("Bot:", response["response"])
|
|
||||||
|
|
||||||
chat.append({"question": prompt, "answer": response["response"]})
|
|
||||||
```
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
|
|
||||||
The repository is written in Python and requires the following packages:
|
|
||||||
|
|
||||||
* websocket-client
|
|
||||||
* requests
|
|
||||||
* tls-client
|
|
||||||
|
|
||||||
You can install these packages using the provided `requirements.txt` file.
|
|
||||||
|
|
||||||
## Repository structure:
|
|
||||||
.
|
|
||||||
├── ora/
|
|
||||||
├── quora/ (/poe)
|
|
||||||
├── t3nsor/
|
|
||||||
├── testing/
|
|
||||||
├── writesonic/
|
|
||||||
├── you/
|
|
||||||
├── README.md <-- this file.
|
|
||||||
└── requirements.txt
|
|
||||||
|
|||||||
15
forefront/README.md
Normal file
15
forefront/README.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
|
||||||
|
|
||||||
|
```python
|
||||||
|
import forefront
|
||||||
|
|
||||||
|
# create an account
|
||||||
|
token = forefront.Account.create(logging=True)
|
||||||
|
print(token)
|
||||||
|
|
||||||
|
# get a response
|
||||||
|
for response in forefront.StreamingCompletion.create(token = token,
|
||||||
|
prompt = 'hello world', model='gpt-4'):
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text, end = '')
|
||||||
|
```
|
||||||
145
forefront/__init__.py
Normal file
145
forefront/__init__.py
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
from tls_client import Session
|
||||||
|
from forefront.mail import Mail
|
||||||
|
from time import time, sleep
|
||||||
|
from re import match
|
||||||
|
from forefront.typing import ForeFrontResponse
|
||||||
|
from uuid import uuid4
|
||||||
|
from requests import post
|
||||||
|
from json import loads
|
||||||
|
|
||||||
|
|
||||||
|
class Account:
|
||||||
|
def create(proxy = None, logging = False):
|
||||||
|
|
||||||
|
proxies = {
|
||||||
|
'http': 'http://' + proxy,
|
||||||
|
'https': 'http://' + proxy } if proxy else False
|
||||||
|
|
||||||
|
start = time()
|
||||||
|
|
||||||
|
mail = Mail(proxies)
|
||||||
|
mail_token = None
|
||||||
|
mail_adress = mail.get_mail()
|
||||||
|
|
||||||
|
#print(mail_adress)
|
||||||
|
|
||||||
|
client = Session(client_identifier='chrome110')
|
||||||
|
client.proxies = proxies
|
||||||
|
client.headers = {
|
||||||
|
"origin": "https://accounts.forefront.ai",
|
||||||
|
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
|
||||||
|
}
|
||||||
|
|
||||||
|
response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
|
||||||
|
data = {
|
||||||
|
"email_address": mail_adress
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
trace_token = response.json()['response']['id']
|
||||||
|
if logging: print(trace_token)
|
||||||
|
|
||||||
|
response = client.post(f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
|
||||||
|
data = {
|
||||||
|
"strategy" : "email_code",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if logging: print(response.text)
|
||||||
|
|
||||||
|
if not 'sign_up_attempt' in response.text:
|
||||||
|
return 'Failed to create account!'
|
||||||
|
|
||||||
|
while True:
|
||||||
|
sleep(1)
|
||||||
|
for _ in mail.fetch_inbox():
|
||||||
|
print(mail.get_message_content(_["id"]))
|
||||||
|
mail_token = match(r"(\d){5,6}", mail.get_message_content(_["id"])).group(0)
|
||||||
|
|
||||||
|
if mail_token:
|
||||||
|
break
|
||||||
|
|
||||||
|
if logging: print(mail_token)
|
||||||
|
|
||||||
|
response = client.post(f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4', data = {
|
||||||
|
'code': mail_token,
|
||||||
|
'strategy': 'email_code'
|
||||||
|
})
|
||||||
|
|
||||||
|
if logging: print(response.json())
|
||||||
|
|
||||||
|
token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
|
||||||
|
|
||||||
|
with open('accounts.txt', 'a') as f:
|
||||||
|
f.write(f'{mail_adress}:{token}\n')
|
||||||
|
|
||||||
|
if logging: print(time() - start)
|
||||||
|
|
||||||
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
class StreamingCompletion:
|
||||||
|
def create(
|
||||||
|
token = None,
|
||||||
|
chatId = None,
|
||||||
|
prompt = '',
|
||||||
|
actionType = 'new',
|
||||||
|
defaultPersona = '607e41fe-95be-497e-8e97-010a59b2e2c0', # default
|
||||||
|
model = 'gpt-4') -> ForeFrontResponse:
|
||||||
|
|
||||||
|
if not token: raise Exception('Token is required!')
|
||||||
|
if not chatId: chatId = str(uuid4())
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'authority' : 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
|
||||||
|
'accept' : '*/*',
|
||||||
|
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'authorization' : 'Bearer ' + token,
|
||||||
|
'cache-control' : 'no-cache',
|
||||||
|
'content-type' : 'application/json',
|
||||||
|
'origin' : 'https://chat.forefront.ai',
|
||||||
|
'pragma' : 'no-cache',
|
||||||
|
'referer' : 'https://chat.forefront.ai/',
|
||||||
|
'sec-ch-ua' : '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
|
'sec-ch-ua-mobile' : '?0',
|
||||||
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
|
'sec-fetch-dest' : 'empty',
|
||||||
|
'sec-fetch-mode' : 'cors',
|
||||||
|
'sec-fetch-site' : 'cross-site',
|
||||||
|
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
|
||||||
|
json_data = {
|
||||||
|
'text' : prompt,
|
||||||
|
'action' : actionType,
|
||||||
|
'parentId' : chatId,
|
||||||
|
'workspaceId' : chatId,
|
||||||
|
'messagePersona' : defaultPersona,
|
||||||
|
'model' : model
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
|
||||||
|
headers=headers, json=json_data, stream=True).iter_lines():
|
||||||
|
|
||||||
|
if b'finish_reason":null' in chunk:
|
||||||
|
data = loads(chunk.decode('utf-8').split('data: ')[1])
|
||||||
|
token = data['choices'][0]['delta'].get('content')
|
||||||
|
|
||||||
|
if token != None:
|
||||||
|
yield ForeFrontResponse({
|
||||||
|
'id' : chatId,
|
||||||
|
'object' : 'text_completion',
|
||||||
|
'created': int(time()),
|
||||||
|
'model' : model,
|
||||||
|
'choices': [{
|
||||||
|
'text' : token,
|
||||||
|
'index' : 0,
|
||||||
|
'logprobs' : None,
|
||||||
|
'finish_reason' : 'stop'
|
||||||
|
}],
|
||||||
|
'usage': {
|
||||||
|
'prompt_tokens' : len(prompt),
|
||||||
|
'completion_tokens' : len(token),
|
||||||
|
'total_tokens' : len(prompt) + len(token)
|
||||||
|
}
|
||||||
|
})
|
||||||
55
forefront/mail.py
Normal file
55
forefront/mail.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
from requests import Session
|
||||||
|
from string import ascii_letters
|
||||||
|
from random import choices
|
||||||
|
|
||||||
|
class Mail:
|
||||||
|
def __init__(self, proxies: dict = None) -> None:
|
||||||
|
self.client = Session()
|
||||||
|
self.client.proxies = proxies
|
||||||
|
self.client.headers = {
|
||||||
|
"host": "api.mail.tm",
|
||||||
|
"connection": "keep-alive",
|
||||||
|
"sec-ch-ua": "\"Google Chrome\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"",
|
||||||
|
"accept": "application/json, text/plain, */*",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
|
||||||
|
"sec-ch-ua-platform": "\"macOS\"",
|
||||||
|
"origin": "https://mail.tm",
|
||||||
|
"sec-fetch-site": "same-site",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"referer": "https://mail.tm/",
|
||||||
|
"accept-encoding": "gzip, deflate, br",
|
||||||
|
"accept-language": "en-GB,en-US;q=0.9,en;q=0.8"
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_mail(self) -> str:
|
||||||
|
token = ''.join(choices(ascii_letters, k=14)).lower()
|
||||||
|
init = self.client.post("https://api.mail.tm/accounts", json={
|
||||||
|
"address" : f"{token}@bugfoo.com",
|
||||||
|
"password": token
|
||||||
|
})
|
||||||
|
|
||||||
|
if init.status_code == 201:
|
||||||
|
resp = self.client.post("https://api.mail.tm/token", json = {
|
||||||
|
**init.json(),
|
||||||
|
"password": token
|
||||||
|
})
|
||||||
|
|
||||||
|
self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']
|
||||||
|
|
||||||
|
return f"{token}@bugfoo.com"
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise Exception("Failed to create email")
|
||||||
|
|
||||||
|
def fetch_inbox(self):
|
||||||
|
return self.client.get(f"https://api.mail.tm/messages").json()["hydra:member"]
|
||||||
|
|
||||||
|
def get_message(self, message_id: str):
|
||||||
|
return self.client.get(f"https://api.mail.tm/messages/{message_id}").json()
|
||||||
|
|
||||||
|
def get_message_content(self, message_id: str):
|
||||||
|
return self.get_message(message_id)["text"]
|
||||||
|
|
||||||
@@ -1,7 +1,5 @@
|
|||||||
class OraResponse:
|
class ForeFrontResponse:
|
||||||
|
|
||||||
class Completion:
|
class Completion:
|
||||||
|
|
||||||
class Choices:
|
class Choices:
|
||||||
def __init__(self, choice: dict) -> None:
|
def __init__(self, choice: dict) -> None:
|
||||||
self.text = choice['text']
|
self.text = choice['text']
|
||||||
9
gui/README.md
Normal file
9
gui/README.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# gpt4free gui
|
||||||
|
|
||||||
|
preview:
|
||||||
|
|
||||||
|
<img width="1125" alt="image" src="https://user-images.githubusercontent.com/98614666/234232398-09e9d3c5-08e6-4b8a-b4f2-0666e9790c7d.png">
|
||||||
|
|
||||||
|
run:
|
||||||
|
|
||||||
|
<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
|
||||||
48
gui/streamlit_app.py
Normal file
48
gui/streamlit_app.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
import streamlit as st
|
||||||
|
import phind
|
||||||
|
|
||||||
|
phind.cf_clearance = ''
|
||||||
|
phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
||||||
|
|
||||||
|
def phind_get_answer(question:str)->str:
|
||||||
|
# set cf_clearance cookie
|
||||||
|
try:
|
||||||
|
|
||||||
|
result = phind.Completion.create(
|
||||||
|
model = 'gpt-4',
|
||||||
|
prompt = question,
|
||||||
|
results = phind.Search.create(question, actualSearch = True),
|
||||||
|
creative = False,
|
||||||
|
detailed = False,
|
||||||
|
codeContext = '')
|
||||||
|
return result.completion.choices[0].text
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return 'An error occured, please make sure you are using a cf_clearance token and correct useragent | %s' % e
|
||||||
|
|
||||||
|
st.set_page_config(
|
||||||
|
page_title="gpt4freeGUI",
|
||||||
|
initial_sidebar_state="expanded",
|
||||||
|
page_icon="🧠",
|
||||||
|
menu_items={
|
||||||
|
'Get Help': 'https://github.com/xtekky/gpt4free/blob/main/README.md',
|
||||||
|
'Report a bug': "https://github.com/xtekky/gpt4free/issues",
|
||||||
|
'About': "### gptfree GUI"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
st.header('GPT4free GUI')
|
||||||
|
|
||||||
|
question_text_area = st.text_area('🤖 Ask Any Question :', placeholder='Explain quantum computing in 50 words')
|
||||||
|
if st.button('🧠 Think'):
|
||||||
|
answer = phind_get_answer(question_text_area)
|
||||||
|
st.caption("Answer :")
|
||||||
|
st.markdown(answer)
|
||||||
|
|
||||||
|
|
||||||
|
hide_streamlit_style = """
|
||||||
|
<style>
|
||||||
|
footer {visibility: hidden;}
|
||||||
|
</style>
|
||||||
|
"""
|
||||||
|
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
from ora.model import CompletionModel
|
|
||||||
from ora.typing import OraResponse
|
|
||||||
from requests import post
|
|
||||||
from time import time
|
|
||||||
from random import randint
|
|
||||||
|
|
||||||
class Completion:
|
|
||||||
def create(
|
|
||||||
model : CompletionModel,
|
|
||||||
prompt: str,
|
|
||||||
includeHistory: bool = True,
|
|
||||||
conversationId: str or None = None) -> OraResponse:
|
|
||||||
|
|
||||||
extra = {
|
|
||||||
'conversationId': conversationId} if conversationId else {}
|
|
||||||
|
|
||||||
response = post('https://ora.sh/api/conversation',
|
|
||||||
headers = {
|
|
||||||
"host" : "ora.sh",
|
|
||||||
"authorization" : f"Bearer AY0{randint(1111, 9999)}",
|
|
||||||
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
|
||||||
"origin" : "https://ora.sh",
|
|
||||||
"referer" : "https://ora.sh/chat/",
|
|
||||||
},
|
|
||||||
json = extra | {
|
|
||||||
'chatbotId': model.id,
|
|
||||||
'input' : prompt,
|
|
||||||
'userId' : model.createdBy,
|
|
||||||
'model' : model.modelName,
|
|
||||||
'provider' : 'OPEN_AI',
|
|
||||||
'includeHistory': includeHistory}).json()
|
|
||||||
|
|
||||||
return OraResponse({
|
|
||||||
'id' : response['conversationId'],
|
|
||||||
'object' : 'text_completion',
|
|
||||||
'created': int(time()),
|
|
||||||
'model' : model.slug,
|
|
||||||
'choices': [{
|
|
||||||
'text' : response['response'],
|
|
||||||
'index' : 0,
|
|
||||||
'logprobs' : None,
|
|
||||||
'finish_reason' : 'stop'
|
|
||||||
}],
|
|
||||||
'usage': {
|
|
||||||
'prompt_tokens' : len(prompt),
|
|
||||||
'completion_tokens' : len(response['response']),
|
|
||||||
'total_tokens' : len(prompt) + len(response['response'])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
46
ora/model.py
46
ora/model.py
@@ -1,46 +0,0 @@
|
|||||||
from uuid import uuid4
|
|
||||||
from requests import post
|
|
||||||
|
|
||||||
class CompletionModel:
|
|
||||||
system_prompt = None
|
|
||||||
description = None
|
|
||||||
createdBy = None
|
|
||||||
createdAt = None
|
|
||||||
slug = None
|
|
||||||
id = None
|
|
||||||
model = 'gpt-3.5-turbo'
|
|
||||||
|
|
||||||
def create(
|
|
||||||
system_prompt: str = 'You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible',
|
|
||||||
description : str = 'ChatGPT Openai Language Model',
|
|
||||||
name : str = 'gpt-3.5'):
|
|
||||||
|
|
||||||
CompletionModel.system_prompt = system_prompt
|
|
||||||
CompletionModel.description = description
|
|
||||||
CompletionModel.slug = name
|
|
||||||
|
|
||||||
|
|
||||||
response = post('https://ora.sh/api/assistant', json = {
|
|
||||||
'prompt' : system_prompt,
|
|
||||||
'userId' : f'auto:{uuid4()}',
|
|
||||||
'name' : name,
|
|
||||||
'description': description})
|
|
||||||
|
|
||||||
CompletionModel.id = response.json()['id']
|
|
||||||
CompletionModel.createdBy = response.json()['createdBy']
|
|
||||||
CompletionModel.createdAt = response.json()['createdAt']
|
|
||||||
|
|
||||||
return CompletionModel
|
|
||||||
|
|
||||||
def load(chatbotId: str, modelName: str = 'gpt-3.5-turbo', userId: str = None):
|
|
||||||
if userId is None: userId = f'{uuid4()}'
|
|
||||||
|
|
||||||
CompletionModel.system_prompt = None
|
|
||||||
CompletionModel.description = None
|
|
||||||
CompletionModel.slug = None
|
|
||||||
CompletionModel.id = chatbotId
|
|
||||||
CompletionModel.createdBy = userId
|
|
||||||
CompletionModel.createdAt = None
|
|
||||||
CompletionModel.modelName = modelName
|
|
||||||
|
|
||||||
return CompletionModel
|
|
||||||
34
phind/README.md
Normal file
34
phind/README.md
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
### Example: `phind` (use like openai pypi package) <a name="example-phind"></a>
|
||||||
|
|
||||||
|
```python
|
||||||
|
import phind
|
||||||
|
|
||||||
|
# set cf_clearance cookie (needed again)
|
||||||
|
phind.cf_clearance = 'xx.xx-1682166681-0-160'
|
||||||
|
phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' # same as the one from browser you got cf_clearance from
|
||||||
|
|
||||||
|
prompt = 'who won the quatar world cup'
|
||||||
|
|
||||||
|
# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
|
||||||
|
# stream completion
|
||||||
|
for result in phind.StreamingCompletion.create(
|
||||||
|
model = 'gpt-4',
|
||||||
|
prompt = prompt,
|
||||||
|
results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
|
||||||
|
creative = False,
|
||||||
|
detailed = False,
|
||||||
|
codeContext = ''): # up to 3000 chars of code
|
||||||
|
|
||||||
|
print(result.completion.choices[0].text, end='', flush=True)
|
||||||
|
|
||||||
|
# normal completion
|
||||||
|
result = phind.Completion.create(
|
||||||
|
model = 'gpt-4',
|
||||||
|
prompt = prompt,
|
||||||
|
results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
|
||||||
|
creative = False,
|
||||||
|
detailed = False,
|
||||||
|
codeContext = '') # up to 3000 chars of code
|
||||||
|
|
||||||
|
print(result.completion.choices[0].text)
|
||||||
|
```
|
||||||
@@ -1,24 +1,14 @@
|
|||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
from tls_client import Session
|
|
||||||
from time import time
|
from time import time
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from queue import Queue, Empty
|
||||||
|
from threading import Thread
|
||||||
|
from re import findall
|
||||||
|
|
||||||
client = Session(client_identifier='chrome110')
|
from curl_cffi.requests import post
|
||||||
client.headers = {
|
|
||||||
'authority': 'www.phind.com',
|
cf_clearance = ''
|
||||||
'accept': '*/*',
|
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
||||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
||||||
'content-type': 'application/json',
|
|
||||||
'origin': 'https://www.phind.com',
|
|
||||||
'referer': 'https://www.phind.com/search',
|
|
||||||
'sec-ch-ua': '"Chromium";v="110", "Google Chrome";v="110", "Not:A-Brand";v="99"',
|
|
||||||
'sec-ch-ua-mobile': '?0',
|
|
||||||
'sec-ch-ua-platform': '"macOS"',
|
|
||||||
'sec-fetch-dest': 'empty',
|
|
||||||
'sec-fetch-mode': 'cors',
|
|
||||||
'sec-fetch-site': 'same-origin',
|
|
||||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
|
|
||||||
}
|
|
||||||
|
|
||||||
class PhindResponse:
|
class PhindResponse:
|
||||||
|
|
||||||
@@ -36,7 +26,7 @@ class PhindResponse:
|
|||||||
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
|
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
|
||||||
|
|
||||||
def __init__(self, choices: dict) -> None:
|
def __init__(self, choices: dict) -> None:
|
||||||
self.choices = [self.Choices(choice) for choice in choices]
|
self.choices = list(map(self.Choices, choices))
|
||||||
|
|
||||||
class Usage:
|
class Usage:
|
||||||
def __init__(self, usage_dict: dict) -> None:
|
def __init__(self, usage_dict: dict) -> None:
|
||||||
@@ -63,6 +53,11 @@ class PhindResponse:
|
|||||||
|
|
||||||
class Search:
|
class Search:
|
||||||
def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
|
def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
|
||||||
|
if user_agent == '':
|
||||||
|
raise ValueError('user_agent must be set, refer to documentation')
|
||||||
|
if cf_clearance == '' :
|
||||||
|
raise ValueError('cf_clearance must be set, refer to documentation')
|
||||||
|
|
||||||
if not actualSearch:
|
if not actualSearch:
|
||||||
return {
|
return {
|
||||||
'_type': 'SearchResponse',
|
'_type': 'SearchResponse',
|
||||||
@@ -81,11 +76,28 @@ class Search:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return client.post('https://www.phind.com/api/bing/search', json = {
|
headers = {
|
||||||
|
'authority': 'www.phind.com',
|
||||||
|
'accept': '*/*',
|
||||||
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'cookie': f'cf_clearance={cf_clearance}',
|
||||||
|
'origin': 'https://www.phind.com',
|
||||||
|
'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
|
||||||
|
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
|
'sec-ch-ua-mobile': '?0',
|
||||||
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'same-origin',
|
||||||
|
'user-agent': user_agent
|
||||||
|
}
|
||||||
|
|
||||||
|
return post('https://www.phind.com/api/bing/search', headers = headers, json = {
|
||||||
'q': prompt,
|
'q': prompt,
|
||||||
'userRankList': {},
|
'userRankList': {},
|
||||||
'browserLanguage': language}).json()['rawBingResults']
|
'browserLanguage': language}).json()['rawBingResults']
|
||||||
|
|
||||||
|
|
||||||
class Completion:
|
class Completion:
|
||||||
def create(
|
def create(
|
||||||
model = 'gpt-4',
|
model = 'gpt-4',
|
||||||
@@ -96,6 +108,12 @@ class Completion:
|
|||||||
codeContext: str = '',
|
codeContext: str = '',
|
||||||
language: str = 'en') -> PhindResponse:
|
language: str = 'en') -> PhindResponse:
|
||||||
|
|
||||||
|
if user_agent == '' :
|
||||||
|
raise ValueError('user_agent must be set, refer to documentation')
|
||||||
|
|
||||||
|
if cf_clearance == '' :
|
||||||
|
raise ValueError('cf_clearance must be set, refer to documentation')
|
||||||
|
|
||||||
if results is None:
|
if results is None:
|
||||||
results = Search.create(prompt, actualSearch = True)
|
results = Search.create(prompt, actualSearch = True)
|
||||||
|
|
||||||
@@ -121,12 +139,29 @@ class Completion:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'authority': 'www.phind.com',
|
||||||
|
'accept': '*/*',
|
||||||
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'content-type': 'application/json',
|
||||||
|
'cookie': f'cf_clearance={cf_clearance}',
|
||||||
|
'origin': 'https://www.phind.com',
|
||||||
|
'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
|
||||||
|
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
|
'sec-ch-ua-mobile': '?0',
|
||||||
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'same-origin',
|
||||||
|
'user-agent': user_agent
|
||||||
|
}
|
||||||
|
|
||||||
completion = ''
|
completion = ''
|
||||||
response = client.post('https://www.phind.com/api/infer/answer', json=json_data, timeout_seconds=200)
|
response = post('https://www.phind.com/api/infer/answer', headers = headers, json = json_data, timeout=99999, impersonate='chrome110')
|
||||||
for line in response.text.split('\r\n\r\n'):
|
for line in response.text.split('\r\n\r\n'):
|
||||||
completion += (line.replace('data: ', ''))
|
completion += (line.replace('data: ', ''))
|
||||||
|
|
||||||
return PhindResponse({
|
return PhindResponse({
|
||||||
'id' : f'cmpl-1337-{int(time())}',
|
'id' : f'cmpl-1337-{int(time())}',
|
||||||
'object' : 'text_completion',
|
'object' : 'text_completion',
|
||||||
'created': int(time()),
|
'created': int(time()),
|
||||||
@@ -142,4 +177,115 @@ class Completion:
|
|||||||
'completion_tokens' : len(completion),
|
'completion_tokens' : len(completion),
|
||||||
'total_tokens' : len(prompt) + len(completion)
|
'total_tokens' : len(prompt) + len(completion)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
class StreamingCompletion:
|
||||||
|
message_queue = Queue()
|
||||||
|
stream_completed = False
|
||||||
|
|
||||||
|
def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
|
||||||
|
|
||||||
|
models = {
|
||||||
|
'gpt-4' : 'expert',
|
||||||
|
'gpt-3.5-turbo' : 'intermediate',
|
||||||
|
'gpt-3.5': 'intermediate',
|
||||||
|
}
|
||||||
|
|
||||||
|
json_data = {
|
||||||
|
'question' : prompt,
|
||||||
|
'bingResults' : results,
|
||||||
|
'codeContext' : codeContext,
|
||||||
|
'options': {
|
||||||
|
'skill' : models[model],
|
||||||
|
'date' : datetime.now().strftime("%d/%m/%Y"),
|
||||||
|
'language': language,
|
||||||
|
'detailed': detailed,
|
||||||
|
'creative': creative
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'authority': 'www.phind.com',
|
||||||
|
'accept': '*/*',
|
||||||
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'content-type': 'application/json',
|
||||||
|
'cookie': f'cf_clearance={cf_clearance}',
|
||||||
|
'origin': 'https://www.phind.com',
|
||||||
|
'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
|
||||||
|
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
|
'sec-ch-ua-mobile': '?0',
|
||||||
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'same-origin',
|
||||||
|
'user-agent': user_agent
|
||||||
|
}
|
||||||
|
|
||||||
|
response = post('https://www.phind.com/api/infer/answer',
|
||||||
|
headers = headers, json = json_data, timeout=99999, impersonate='chrome110', content_callback=StreamingCompletion.handle_stream_response)
|
||||||
|
|
||||||
|
|
||||||
|
StreamingCompletion.stream_completed = True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create(
|
||||||
|
model : str = 'gpt-4',
|
||||||
|
prompt : str = '',
|
||||||
|
results : dict = None,
|
||||||
|
creative : bool = False,
|
||||||
|
detailed : bool = False,
|
||||||
|
codeContext : str = '',
|
||||||
|
language : str = 'en'):
|
||||||
|
|
||||||
|
if user_agent == '':
|
||||||
|
raise ValueError('user_agent must be set, refer to documentation')
|
||||||
|
if cf_clearance == '' :
|
||||||
|
raise ValueError('cf_clearance must be set, refer to documentation')
|
||||||
|
|
||||||
|
if results is None:
|
||||||
|
results = Search.create(prompt, actualSearch = True)
|
||||||
|
|
||||||
|
if len(codeContext) > 2999:
|
||||||
|
raise ValueError('codeContext must be less than 3000 characters')
|
||||||
|
|
||||||
|
Thread(target = StreamingCompletion.request, args = [
|
||||||
|
model, prompt, results, creative, detailed, codeContext, language]).start()
|
||||||
|
|
||||||
|
while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
|
||||||
|
try:
|
||||||
|
chunk = StreamingCompletion.message_queue.get(timeout=0)
|
||||||
|
|
||||||
|
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
|
||||||
|
chunk = b'data: \n\n\r\n\r\n'
|
||||||
|
|
||||||
|
chunk = chunk.decode()
|
||||||
|
|
||||||
|
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
|
||||||
|
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
|
||||||
|
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
|
||||||
|
|
||||||
|
yield PhindResponse({
|
||||||
|
'id' : f'cmpl-1337-{int(time())}',
|
||||||
|
'object' : 'text_completion',
|
||||||
|
'created': int(time()),
|
||||||
|
'model' : model,
|
||||||
|
'choices': [{
|
||||||
|
'text' : chunk,
|
||||||
|
'index' : 0,
|
||||||
|
'logprobs' : None,
|
||||||
|
'finish_reason' : 'stop'
|
||||||
|
}],
|
||||||
|
'usage': {
|
||||||
|
'prompt_tokens' : len(prompt),
|
||||||
|
'completion_tokens' : len(chunk),
|
||||||
|
'total_tokens' : len(prompt) + len(chunk)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
except Empty:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def handle_stream_response(response):
|
||||||
|
StreamingCompletion.message_queue.put(response)
|
||||||
|
|||||||
BIN
phind/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
phind/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
68
quora/README.md
Normal file
68
quora/README.md
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
#### warning !!!
|
||||||
|
poe.com added security and can detect if you are making automated requests. You may get your account banned if you are using this api.
|
||||||
|
The normal non-driver api is also currently not very stable
|
||||||
|
|
||||||
|
|
||||||
|
### Example: `quora (poe)` (use like openai pypi package) - GPT-4 <a name="example-poe"></a>
|
||||||
|
|
||||||
|
```python
|
||||||
|
# quora model names: (use left key as argument)
|
||||||
|
models = {
|
||||||
|
'sage' : 'capybara',
|
||||||
|
'gpt-4' : 'beaver',
|
||||||
|
'claude-v1.2' : 'a2_2',
|
||||||
|
'claude-instant-v1.0' : 'a2',
|
||||||
|
'gpt-3.5-turbo' : 'chinchilla'
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### !! new: bot creation
|
||||||
|
|
||||||
|
```python
|
||||||
|
# import quora (poe) package
|
||||||
|
import quora
|
||||||
|
|
||||||
|
# create account
|
||||||
|
# make sure to set enable_bot_creation to True
|
||||||
|
token = quora.Account.create(logging = True, enable_bot_creation=True)
|
||||||
|
|
||||||
|
model = quora.Model.create(
|
||||||
|
token = token,
|
||||||
|
model = 'gpt-3.5-turbo', # or claude-instant-v1.0
|
||||||
|
system_prompt = 'you are ChatGPT a large language model ...'
|
||||||
|
)
|
||||||
|
|
||||||
|
print(model.name) # gptx....
|
||||||
|
|
||||||
|
# streaming response
|
||||||
|
for response in quora.StreamingCompletion.create(
|
||||||
|
custom_model = model.name,
|
||||||
|
prompt ='hello world',
|
||||||
|
token = token):
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Normal Response:
|
||||||
|
```python
|
||||||
|
|
||||||
|
response = quora.Completion.create(model = 'gpt-4',
|
||||||
|
prompt = 'hello world',
|
||||||
|
token = token)
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Update Use This For Poe
|
||||||
|
```python
|
||||||
|
from quora import Poe
|
||||||
|
|
||||||
|
# available models: ['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']
|
||||||
|
|
||||||
|
poe = Poe(model='ChatGPT', driver='firefox', cookie_path='cookie.json', driver_path='path_of_driver')
|
||||||
|
poe.chat('who won the football world cup most?')
|
||||||
|
|
||||||
|
# new bot creation
|
||||||
|
poe.create_bot('new_bot_name', prompt='You are new test bot', base_model='gpt-3.5-turbo')
|
||||||
|
|
||||||
|
```
|
||||||
@@ -1,45 +1,82 @@
|
|||||||
from quora.api import Client as PoeClient
|
import json
|
||||||
from quora.mail import Mail
|
from datetime import datetime
|
||||||
from requests import Session
|
from hashlib import md5
|
||||||
from re import search, findall
|
from json import dumps
|
||||||
from json import loads
|
from pathlib import Path
|
||||||
from time import sleep
|
from random import choice, choices, randint
|
||||||
from pathlib import Path
|
from re import search, findall
|
||||||
from random import choice, choices, randint
|
from string import ascii_letters, digits
|
||||||
from string import ascii_letters, digits
|
from typing import Optional, Union
|
||||||
from urllib import parse
|
from urllib.parse import unquote
|
||||||
from os import urandom
|
|
||||||
from hashlib import md5
|
import selenium.webdriver.support.expected_conditions as EC
|
||||||
from json import dumps
|
from fake_useragent import UserAgent
|
||||||
|
from pypasser import reCaptchaV3
|
||||||
|
from requests import Session
|
||||||
|
from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
|
||||||
|
from selenium.webdriver.common.by import By
|
||||||
|
from selenium.webdriver.support.wait import WebDriverWait
|
||||||
|
from tls_client import Session as TLS
|
||||||
|
|
||||||
|
from quora.api import Client as PoeClient
|
||||||
|
from quora.mail import Emailnator
|
||||||
|
|
||||||
|
SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
|
||||||
|
being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
|
||||||
|
to your system\'s PATH.\n\nHere are the steps to resolve the issue:\n\n1. Download the geckodriver for your platform
|
||||||
|
(Windows, macOS, or Linux) from the following link: https://github.com/mozilla/geckodriver/releases\n\n2. Extract the
|
||||||
|
downloaded archive and locate the geckodriver executable.\n\n3. Add the geckodriver executable to your system\'s
|
||||||
|
PATH.\n\nFor macOS and Linux:\n\n- Open a terminal window.\n- Move the geckodriver executable to a directory that is
|
||||||
|
already in your PATH, or create a new directory and add it to your PATH:\n\n```bash\n# Example: Move geckodriver to
|
||||||
|
/usr/local/bin\nmv /path/to/your/geckodriver /usr/local/bin\n```\n\n- If you created a new directory, add it to your
|
||||||
|
PATH:\n\n```bash\n# Example: Add a new directory to PATH\nexport PATH=$PATH:/path/to/your/directory\n```\n\nFor
|
||||||
|
Windows:\n\n- Right-click on "My Computer" or "This PC" and select "Properties".\n- Click on "Advanced system
|
||||||
|
settings".\n- Click on the "Environment Variables" button.\n- In the "System variables" section, find the "Path"
|
||||||
|
variable, select it, and click "Edit".\n- Click "New" and add the path to the directory containing the geckodriver
|
||||||
|
executable.\n\nAfter adding the geckodriver to your PATH, restart your terminal or command prompt and try running
|
||||||
|
your script again. The error should be resolved.'''
|
||||||
|
|
||||||
|
# from twocaptcha import TwoCaptcha
|
||||||
|
# solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
|
||||||
|
|
||||||
|
MODELS = {
|
||||||
|
'Sage': 'capybara',
|
||||||
|
'GPT-4': 'beaver',
|
||||||
|
'Claude+': 'a2_2',
|
||||||
|
'Claude-instant': 'a2',
|
||||||
|
'ChatGPT': 'chinchilla',
|
||||||
|
'Dragonfly': 'nutria',
|
||||||
|
'NeevaAI': 'hutia',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def extract_formkey(html):
|
def extract_formkey(html):
|
||||||
script_regex = r'<script>if\(.+\)throw new Error;(.+)</script>'
|
script_regex = r'<script>if\(.+\)throw new Error;(.+)</script>'
|
||||||
script_text = search(script_regex, html).group(1)
|
script_text = search(script_regex, html).group(1)
|
||||||
key_regex = r'var .="([0-9a-f]+)",'
|
key_regex = r'var .="([0-9a-f]+)",'
|
||||||
key_text = search(key_regex, script_text).group(1)
|
key_text = search(key_regex, script_text).group(1)
|
||||||
cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]'
|
cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]'
|
||||||
cipher_pairs = findall(cipher_regex, script_text)
|
cipher_pairs = findall(cipher_regex, script_text)
|
||||||
|
|
||||||
formkey_list = [""] * len(cipher_pairs)
|
formkey_list = [''] * len(cipher_pairs)
|
||||||
for pair in cipher_pairs:
|
for pair in cipher_pairs:
|
||||||
formkey_index, key_index = map(int, pair)
|
formkey_index, key_index = map(int, pair)
|
||||||
formkey_list[formkey_index] = key_text[key_index]
|
formkey_list[formkey_index] = key_text[key_index]
|
||||||
formkey = "".join(formkey_list)
|
formkey = ''.join(formkey_list)
|
||||||
|
|
||||||
return formkey
|
return formkey
|
||||||
|
|
||||||
|
|
||||||
class PoeResponse:
|
class PoeResponse:
|
||||||
|
|
||||||
class Completion:
|
class Completion:
|
||||||
|
|
||||||
class Choices:
|
class Choices:
|
||||||
def __init__(self, choice: dict) -> None:
|
def __init__(self, choice: dict) -> None:
|
||||||
self.text = choice['text']
|
self.text = choice['text']
|
||||||
self.content = self.text.encode()
|
self.content = self.text.encode()
|
||||||
self.index = choice['index']
|
self.index = choice['index']
|
||||||
self.logprobs = choice['logprobs']
|
self.logprobs = choice['logprobs']
|
||||||
self.finish_reason = choice['finish_reason']
|
self.finish_reason = choice['finish_reason']
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
def __repr__(self) -> str:
|
||||||
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
|
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
|
||||||
|
|
||||||
@@ -48,22 +85,21 @@ class PoeResponse:
|
|||||||
|
|
||||||
class Usage:
|
class Usage:
|
||||||
def __init__(self, usage_dict: dict) -> None:
|
def __init__(self, usage_dict: dict) -> None:
|
||||||
self.prompt_tokens = usage_dict['prompt_tokens']
|
self.prompt_tokens = usage_dict['prompt_tokens']
|
||||||
self.completion_tokens = usage_dict['completion_tokens']
|
self.completion_tokens = usage_dict['completion_tokens']
|
||||||
self.total_tokens = usage_dict['total_tokens']
|
self.total_tokens = usage_dict['total_tokens']
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
|
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
|
||||||
|
|
||||||
def __init__(self, response_dict: dict) -> None:
|
def __init__(self, response_dict: dict) -> None:
|
||||||
|
self.response_dict = response_dict
|
||||||
self.response_dict = response_dict
|
self.id = response_dict['id']
|
||||||
self.id = response_dict['id']
|
self.object = response_dict['object']
|
||||||
self.object = response_dict['object']
|
self.created = response_dict['created']
|
||||||
self.created = response_dict['created']
|
self.model = response_dict['model']
|
||||||
self.model = response_dict['model']
|
self.completion = self.Completion(response_dict['choices'])
|
||||||
self.completion = self.Completion(response_dict['choices'])
|
self.usage = self.Usage(response_dict['usage'])
|
||||||
self.usage = self.Usage(response_dict['usage'])
|
|
||||||
|
|
||||||
def json(self) -> dict:
|
def json(self) -> dict:
|
||||||
return self.response_dict
|
return self.response_dict
|
||||||
@@ -71,275 +107,389 @@ class PoeResponse:
|
|||||||
|
|
||||||
class ModelResponse:
|
class ModelResponse:
|
||||||
def __init__(self, json_response: dict) -> None:
|
def __init__(self, json_response: dict) -> None:
|
||||||
self.id = json_response['data']['poeBotCreate']['bot']['id']
|
self.id = json_response['data']['poeBotCreate']['bot']['id']
|
||||||
self.name = json_response['data']['poeBotCreate']['bot']['displayName']
|
self.name = json_response['data']['poeBotCreate']['bot']['displayName']
|
||||||
self.limit = json_response['data']['poeBotCreate']['bot']['messageLimit']['dailyLimit']
|
self.limit = json_response['data']['poeBotCreate']['bot']['messageLimit']['dailyLimit']
|
||||||
self.deleted = json_response['data']['poeBotCreate']['bot']['deletionState']
|
self.deleted = json_response['data']['poeBotCreate']['bot']['deletionState']
|
||||||
|
|
||||||
|
|
||||||
class Model:
|
class Model:
|
||||||
|
@staticmethod
|
||||||
def create(
|
def create(
|
||||||
token: str,
|
token: str,
|
||||||
model: str = 'gpt-3.5-turbo', # claude-instant
|
model: str = 'gpt-3.5-turbo', # claude-instant
|
||||||
system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
|
system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
|
||||||
description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
|
description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
|
||||||
handle: str = None) -> ModelResponse:
|
handle: str = None,
|
||||||
|
) -> ModelResponse:
|
||||||
models = {
|
models = {
|
||||||
'gpt-3.5-turbo' : 'chinchilla',
|
'gpt-3.5-turbo': 'chinchilla',
|
||||||
'claude-instant-v1.0': 'a2',
|
'claude-instant-v1.0': 'a2',
|
||||||
'gpt-4': 'beaver'
|
'gpt-4': 'beaver',
|
||||||
}
|
}
|
||||||
|
|
||||||
if not handle:
|
if not handle:
|
||||||
handle = f'gptx{randint(1111111, 9999999)}'
|
handle = f'gptx{randint(1111111, 9999999)}'
|
||||||
|
|
||||||
client = Session()
|
client = Session()
|
||||||
client.cookies['p-b'] = token
|
client.cookies['p-b'] = token
|
||||||
|
|
||||||
formkey = extract_formkey(client.get('https://poe.com').text)
|
formkey = extract_formkey(client.get('https://poe.com').text)
|
||||||
settings = client.get('https://poe.com/api/settings').json()
|
settings = client.get('https://poe.com/api/settings').json()
|
||||||
|
|
||||||
client.headers = {
|
client.headers = {
|
||||||
"host" : "poe.com",
|
'host': 'poe.com',
|
||||||
"origin" : "https://poe.com",
|
'origin': 'https://poe.com',
|
||||||
"referer" : "https://poe.com/",
|
'referer': 'https://poe.com/',
|
||||||
"content-type" : "application/json",
|
'poe-formkey': formkey,
|
||||||
"poe-formkey" : formkey,
|
'poe-tchannel': settings['tchannelData']['channel'],
|
||||||
"poe-tchannel" : settings['tchannelData']['channel'],
|
'user-agent': UserAgent().random,
|
||||||
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
'connection': 'keep-alive',
|
||||||
"connection" : "keep-alive",
|
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
"sec-ch-ua" : "\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"",
|
'sec-ch-ua-mobile': '?0',
|
||||||
"sec-ch-ua-mobile" : "?0",
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
"sec-ch-ua-platform": "\"macOS\"",
|
'content-type': 'application/json',
|
||||||
"content-type" : "application/json",
|
'sec-fetch-site': 'same-origin',
|
||||||
"sec-fetch-site" : "same-origin",
|
'sec-fetch-mode': 'cors',
|
||||||
"sec-fetch-mode" : "cors",
|
'sec-fetch-dest': 'empty',
|
||||||
"sec-fetch-dest" : "empty",
|
'accept': '*/*',
|
||||||
"accept" : "*/*",
|
'accept-encoding': 'gzip, deflate, br',
|
||||||
"accept-encoding" : "gzip, deflate, br",
|
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
|
||||||
"accept-language" : "en-GB,en-US;q=0.9,en;q=0.8",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
payload = dumps(separators=(',', ':'), obj = {
|
payload = dumps(
|
||||||
'queryName': 'CreateBotMain_poeBotCreate_Mutation',
|
separators=(',', ':'),
|
||||||
'variables': {
|
obj={
|
||||||
'model' : models[model],
|
'queryName': 'CreateBotMain_poeBotCreate_Mutation',
|
||||||
'handle' : handle,
|
'variables': {
|
||||||
'prompt' : system_prompt,
|
'model': models[model],
|
||||||
'isPromptPublic' : True,
|
'handle': handle,
|
||||||
'introduction' : '',
|
'prompt': system_prompt,
|
||||||
'description' : description,
|
'isPromptPublic': True,
|
||||||
'profilePictureUrl' : 'https://qph.fs.quoracdn.net/main-qimg-24e0b480dcd946e1cc6728802c5128b6',
|
'introduction': '',
|
||||||
'apiUrl' : None,
|
'description': description,
|
||||||
'apiKey' : ''.join(choices(ascii_letters + digits, k = 32)),
|
'profilePictureUrl': 'https://qph.fs.quoracdn.net/main-qimg-24e0b480dcd946e1cc6728802c5128b6',
|
||||||
'isApiBot' : False,
|
'apiUrl': None,
|
||||||
'hasLinkification' : False,
|
'apiKey': ''.join(choices(ascii_letters + digits, k=32)),
|
||||||
'hasMarkdownRendering' : False,
|
'isApiBot': False,
|
||||||
'hasSuggestedReplies' : False,
|
'hasLinkification': False,
|
||||||
'isPrivateBot' : False
|
'hasMarkdownRendering': False,
|
||||||
|
'hasSuggestedReplies': False,
|
||||||
|
'isPrivateBot': False,
|
||||||
|
},
|
||||||
|
'query': 'mutation CreateBotMain_poeBotCreate_Mutation(\n $model: String!\n $handle: String!\n $prompt: String!\n $isPromptPublic: Boolean!\n $introduction: String!\n $description: String!\n $profilePictureUrl: String\n $apiUrl: String\n $apiKey: String\n $isApiBot: Boolean\n $hasLinkification: Boolean\n $hasMarkdownRendering: Boolean\n $hasSuggestedReplies: Boolean\n $isPrivateBot: Boolean\n) {\n poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {\n status\n bot {\n id\n ...BotHeader_bot\n }\n }\n}\n\nfragment BotHeader_bot on Bot {\n displayName\n messageLimit {\n dailyLimit\n }\n ...BotImage_bot\n ...BotLink_bot\n ...IdAnnotation_node\n ...botHelpers_useViewerCanAccessPrivateBot\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotImage_bot on Bot {\n displayName\n ...botHelpers_useDeletion_bot\n ...BotImage_useProfileImage_bot\n}\n\nfragment BotImage_useProfileImage_bot on Bot {\n image {\n __typename\n ... on LocalBotImage {\n localName\n }\n ... on UrlBotImage {\n url\n }\n }\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotLink_bot on Bot {\n displayName\n}\n\nfragment IdAnnotation_node on Node {\n __isNode: __typename\n id\n}\n\nfragment botHelpers_useDeletion_bot on Bot {\n deletionState\n}\n\nfragment botHelpers_useViewerCanAccessPrivateBot on Bot {\n isPrivateBot\n viewerIsCreator\n}\n',
|
||||||
},
|
},
|
||||||
'query': 'mutation CreateBotMain_poeBotCreate_Mutation(\n $model: String!\n $handle: String!\n $prompt: String!\n $isPromptPublic: Boolean!\n $introduction: String!\n $description: String!\n $profilePictureUrl: String\n $apiUrl: String\n $apiKey: String\n $isApiBot: Boolean\n $hasLinkification: Boolean\n $hasMarkdownRendering: Boolean\n $hasSuggestedReplies: Boolean\n $isPrivateBot: Boolean\n) {\n poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {\n status\n bot {\n id\n ...BotHeader_bot\n }\n }\n}\n\nfragment BotHeader_bot on Bot {\n displayName\n messageLimit {\n dailyLimit\n }\n ...BotImage_bot\n ...BotLink_bot\n ...IdAnnotation_node\n ...botHelpers_useViewerCanAccessPrivateBot\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotImage_bot on Bot {\n displayName\n ...botHelpers_useDeletion_bot\n ...BotImage_useProfileImage_bot\n}\n\nfragment BotImage_useProfileImage_bot on Bot {\n image {\n __typename\n ... on LocalBotImage {\n localName\n }\n ... on UrlBotImage {\n url\n }\n }\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotLink_bot on Bot {\n displayName\n}\n\nfragment IdAnnotation_node on Node {\n __isNode: __typename\n id\n}\n\nfragment botHelpers_useDeletion_bot on Bot {\n deletionState\n}\n\nfragment botHelpers_useViewerCanAccessPrivateBot on Bot {\n isPrivateBot\n viewerIsCreator\n}\n',
|
)
|
||||||
})
|
|
||||||
|
|
||||||
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
|
||||||
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
|
||||||
|
|
||||||
response = client.post("https://poe.com/api/gql_POST", data = payload)
|
|
||||||
|
|
||||||
if not 'success' in response.text:
|
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
||||||
raise Exception('''
|
|
||||||
|
if 'success' not in response.text:
|
||||||
|
raise Exception(
|
||||||
|
'''
|
||||||
Bot creation Failed
|
Bot creation Failed
|
||||||
!! Important !!
|
!! Important !!
|
||||||
Bot creation was not enabled on this account
|
Bot creation was not enabled on this account
|
||||||
please use: quora.Account.create with enable_bot_creation set to True
|
please use: quora.Account.create with enable_bot_creation set to True
|
||||||
''')
|
'''
|
||||||
|
)
|
||||||
|
|
||||||
return ModelResponse(response.json())
|
return ModelResponse(response.json())
|
||||||
|
|
||||||
|
|
||||||
class Account:
|
class Account:
|
||||||
def create(proxy: None or str = None, logging: bool = False, enable_bot_creation: bool = False):
|
@staticmethod
|
||||||
client = Session()
|
def create(
|
||||||
client.proxies = {
|
proxy: Optional[str] = None,
|
||||||
'http': f'http://{proxy}',
|
logging: bool = False,
|
||||||
'https': f'http://{proxy}'} if proxy else None
|
enable_bot_creation: bool = False,
|
||||||
|
):
|
||||||
|
client = TLS(client_identifier='chrome110')
|
||||||
|
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
|
||||||
|
|
||||||
mail = Mail(client.proxies)
|
mail_client = Emailnator()
|
||||||
mail_token = None
|
mail_address = mail_client.get_mail()
|
||||||
_, mail_address = mail.get_mail()
|
|
||||||
|
|
||||||
if logging: print('email', mail_address)
|
if logging:
|
||||||
|
print('email', mail_address)
|
||||||
|
|
||||||
client.headers = {
|
client.headers = {
|
||||||
"host" : "poe.com",
|
'authority': 'poe.com',
|
||||||
"connection" : "keep-alive",
|
'accept': '*/*',
|
||||||
"cache-control" : "max-age=0",
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
"sec-ch-ua" : "\"Microsoft Edge\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"",
|
'content-type': 'application/json',
|
||||||
"sec-ch-ua-mobile" : "?0",
|
'origin': 'https://poe.com',
|
||||||
"sec-ch-ua-platform": "\"macOS\"",
|
'poe-tag-id': 'null',
|
||||||
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.54",
|
'referer': 'https://poe.com/login',
|
||||||
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
"sec-fetch-site" : "same-origin",
|
'sec-ch-ua-mobile': '?0',
|
||||||
"sec-fetch-mode" : "navigate",
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
"content-type" : "application/json",
|
'sec-fetch-dest': 'empty',
|
||||||
"sec-fetch-user" : "?1",
|
'sec-fetch-mode': 'cors',
|
||||||
"sec-fetch-dest" : "document",
|
'sec-fetch-site': 'same-origin',
|
||||||
"accept-encoding" : "gzip, deflate, br",
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||||
"accept-language" : "en-GB,en;q=0.9,en-US;q=0.8",
|
'poe-formkey': extract_formkey(client.get('https://poe.com/login').text),
|
||||||
"upgrade-insecure-requests": "1",
|
'poe-tchannel': client.get('https://poe.com/api/settings').json()['tchannelData']['channel'],
|
||||||
}
|
}
|
||||||
|
|
||||||
client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
|
token = reCaptchaV3(
|
||||||
client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
|
'https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal'
|
||||||
|
)
|
||||||
|
# token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
|
||||||
|
# url = 'https://poe.com/login?redirect_url=%2F',
|
||||||
|
# version = 'v3',
|
||||||
|
# enterprise = 1,
|
||||||
|
# invisible = 1,
|
||||||
|
# action = 'login',)['code']
|
||||||
|
|
||||||
payload = dumps(separators = (',', ':'), obj = {
|
payload = dumps(
|
||||||
'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
|
separators=(',', ':'),
|
||||||
'variables': {
|
obj={
|
||||||
'emailAddress': mail_address,
|
'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
|
||||||
'phoneNumber': None,
|
'variables': {
|
||||||
'recaptchaToken': None,
|
'emailAddress': mail_address,
|
||||||
|
'phoneNumber': None,
|
||||||
|
'recaptchaToken': token,
|
||||||
|
},
|
||||||
|
'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
|
||||||
},
|
},
|
||||||
'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
|
)
|
||||||
})
|
|
||||||
|
|
||||||
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
|
||||||
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
|
||||||
|
|
||||||
|
print(dumps(client.headers, indent=4))
|
||||||
|
|
||||||
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
||||||
|
|
||||||
|
if 'automated_request_detected' in response.text:
|
||||||
|
print('please try using a proxy / wait for fix')
|
||||||
|
|
||||||
if 'Bad Request' in response.text:
|
if 'Bad Request' in response.text:
|
||||||
if logging: print('bad request, retrying...' , response.json())
|
if logging:
|
||||||
|
print('bad request, retrying...', response.json())
|
||||||
quit()
|
quit()
|
||||||
|
|
||||||
if logging: print('send_code' ,response.json())
|
if logging:
|
||||||
|
print('send_code', response.json())
|
||||||
|
|
||||||
while True:
|
mail_content = mail_client.get_message()
|
||||||
sleep(1)
|
mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
|
||||||
messages = mail.fetch_inbox()
|
|
||||||
|
|
||||||
if len(messages["messages"]) > 0:
|
|
||||||
email_content = mail.get_message_content(messages["messages"][0]["_id"])
|
|
||||||
mail_token = findall(r';">(\d{6,7})</div>', email_content)[0]
|
|
||||||
|
|
||||||
if mail_token:
|
|
||||||
break
|
|
||||||
|
|
||||||
if logging: print('code', mail_token)
|
if logging:
|
||||||
|
print('code', mail_token)
|
||||||
|
|
||||||
payload = dumps(separators = (',', ':'), obj={
|
payload = dumps(
|
||||||
"queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
|
separators=(',', ':'),
|
||||||
"variables": {
|
obj={
|
||||||
"verificationCode" : mail_token,
|
'queryName': 'SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation',
|
||||||
"emailAddress" : mail_address,
|
'variables': {
|
||||||
"phoneNumber" : None
|
'verificationCode': str(mail_token),
|
||||||
|
'emailAddress': mail_address,
|
||||||
|
'phoneNumber': None,
|
||||||
|
},
|
||||||
|
'query': 'mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n',
|
||||||
},
|
},
|
||||||
"query": "mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n"
|
)
|
||||||
})
|
|
||||||
|
|
||||||
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
|
||||||
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
|
||||||
|
|
||||||
response = client.post('https://poe.com/api/gql_POST', data = payload)
|
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
||||||
if logging: print('verify_code', response.json())
|
if logging:
|
||||||
|
print('verify_code', response.json())
|
||||||
|
|
||||||
token = parse.unquote(client.cookies.get_dict()['p-b'])
|
def get(self):
|
||||||
|
|
||||||
with open(Path(__file__).resolve().parent / 'cookies.txt', 'a') as f:
|
|
||||||
f.write(f'{token}\n')
|
|
||||||
|
|
||||||
if enable_bot_creation:
|
|
||||||
|
|
||||||
payload = dumps(separators = (',', ':'), obj={
|
|
||||||
"queryName": "UserProfileConfigurePreviewModal_markMultiplayerNuxCompleted_Mutation",
|
|
||||||
"variables": {},
|
|
||||||
"query": "mutation UserProfileConfigurePreviewModal_markMultiplayerNuxCompleted_Mutation {\n markMultiplayerNuxCompleted {\n viewer {\n hasCompletedMultiplayerNux\n id\n }\n }\n}\n"
|
|
||||||
})
|
|
||||||
|
|
||||||
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
|
||||||
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
|
||||||
|
|
||||||
resp = client.post("https://poe.com/api/gql_POST", data = payload)
|
|
||||||
if logging: print(resp.json())
|
|
||||||
|
|
||||||
return token
|
|
||||||
|
|
||||||
def get():
|
|
||||||
cookies = open(Path(__file__).resolve().parent / 'cookies.txt', 'r').read().splitlines()
|
cookies = open(Path(__file__).resolve().parent / 'cookies.txt', 'r').read().splitlines()
|
||||||
return choice(cookies)
|
return choice(cookies)
|
||||||
|
|
||||||
class StreamingCompletion:
|
|
||||||
def create(
|
|
||||||
model : str = 'gpt-4',
|
|
||||||
custom_model : bool = None,
|
|
||||||
prompt: str = 'hello world',
|
|
||||||
token : str = ''):
|
|
||||||
|
|
||||||
models = {
|
class StreamingCompletion:
|
||||||
'sage' : 'capybara',
|
@staticmethod
|
||||||
'gpt-4' : 'beaver',
|
def create(
|
||||||
'claude-v1.2' : 'a2_2',
|
model: str = 'gpt-4',
|
||||||
'claude-instant-v1.0' : 'a2',
|
custom_model: bool = None,
|
||||||
'gpt-3.5-turbo' : 'chinchilla'
|
prompt: str = 'hello world',
|
||||||
}
|
token: str = '',
|
||||||
|
):
|
||||||
_model = models[model] if not custom_model else custom_model
|
_model = MODELS[model] if not custom_model else custom_model
|
||||||
|
|
||||||
client = PoeClient(token)
|
client = PoeClient(token)
|
||||||
|
|
||||||
for chunk in client.send_message(_model, prompt):
|
for chunk in client.send_message(_model, prompt):
|
||||||
|
yield PoeResponse(
|
||||||
yield PoeResponse({
|
{
|
||||||
'id' : chunk["messageId"],
|
'id': chunk['messageId'],
|
||||||
'object' : 'text_completion',
|
'object': 'text_completion',
|
||||||
'created': chunk['creationTime'],
|
'created': chunk['creationTime'],
|
||||||
'model' : _model,
|
'model': _model,
|
||||||
'choices': [{
|
'choices': [
|
||||||
'text' : chunk["text_new"],
|
{
|
||||||
'index' : 0,
|
'text': chunk['text_new'],
|
||||||
'logprobs' : None,
|
'index': 0,
|
||||||
'finish_reason' : 'stop'
|
'logprobs': None,
|
||||||
}],
|
'finish_reason': 'stop',
|
||||||
'usage': {
|
}
|
||||||
'prompt_tokens' : len(prompt),
|
],
|
||||||
'completion_tokens' : len(chunk["text_new"]),
|
'usage': {
|
||||||
'total_tokens' : len(prompt) + len(chunk["text_new"])
|
'prompt_tokens': len(prompt),
|
||||||
|
'completion_tokens': len(chunk['text_new']),
|
||||||
|
'total_tokens': len(prompt) + len(chunk['text_new']),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
|
|
||||||
|
|
||||||
class Completion:
|
class Completion:
|
||||||
def create(
|
def create(
|
||||||
model : str = 'gpt-4',
|
model: str = 'gpt-4',
|
||||||
custom_model : str = None,
|
custom_model: str = None,
|
||||||
prompt: str = 'hello world',
|
prompt: str = 'hello world',
|
||||||
token : str = ''):
|
token: str = '',
|
||||||
|
):
|
||||||
models = {
|
models = {
|
||||||
'sage' : 'capybara',
|
'sage': 'capybara',
|
||||||
'gpt-4' : 'beaver',
|
'gpt-4': 'beaver',
|
||||||
'claude-v1.2' : 'a2_2',
|
'claude-v1.2': 'a2_2',
|
||||||
'claude-instant-v1.0' : 'a2',
|
'claude-instant-v1.0': 'a2',
|
||||||
'gpt-3.5-turbo' : 'chinchilla'
|
'gpt-3.5-turbo': 'chinchilla',
|
||||||
}
|
}
|
||||||
|
|
||||||
_model = models[model] if not custom_model else custom_model
|
_model = models[model] if not custom_model else custom_model
|
||||||
|
|
||||||
client = PoeClient(token)
|
client = PoeClient(token)
|
||||||
|
|
||||||
for chunk in client.send_message(_model, prompt):
|
for chunk in client.send_message(_model, prompt):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return PoeResponse({
|
return PoeResponse(
|
||||||
'id' : chunk["messageId"],
|
{
|
||||||
'object' : 'text_completion',
|
'id': chunk['messageId'],
|
||||||
'created': chunk['creationTime'],
|
'object': 'text_completion',
|
||||||
'model' : _model,
|
'created': chunk['creationTime'],
|
||||||
'choices': [{
|
'model': _model,
|
||||||
'text' : chunk["text"],
|
'choices': [
|
||||||
'index' : 0,
|
{
|
||||||
'logprobs' : None,
|
'text': chunk['text'],
|
||||||
'finish_reason' : 'stop'
|
'index': 0,
|
||||||
}],
|
'logprobs': None,
|
||||||
|
'finish_reason': 'stop',
|
||||||
|
}
|
||||||
|
],
|
||||||
'usage': {
|
'usage': {
|
||||||
'prompt_tokens' : len(prompt),
|
'prompt_tokens': len(prompt),
|
||||||
'completion_tokens' : len(chunk["text"]),
|
'completion_tokens': len(chunk['text']),
|
||||||
'total_tokens' : len(prompt) + len(chunk["text"])
|
'total_tokens': len(prompt) + len(chunk['text']),
|
||||||
}
|
},
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Poe:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model: str = 'ChatGPT',
|
||||||
|
driver: str = 'firefox',
|
||||||
|
download_driver: bool = False,
|
||||||
|
driver_path: Optional[str] = None,
|
||||||
|
cookie_path: str = './quora/cookie.json',
|
||||||
|
):
|
||||||
|
# validating the model
|
||||||
|
if model and model not in MODELS:
|
||||||
|
raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
|
||||||
|
self.model = MODELS[model]
|
||||||
|
self.cookie_path = cookie_path
|
||||||
|
self.cookie = self.__load_cookie(driver, download_driver, driver_path=driver_path)
|
||||||
|
self.client = PoeClient(self.cookie)
|
||||||
|
|
||||||
|
def __load_cookie(self, driver: str, download_driver: bool, driver_path: Optional[str] = None) -> str:
|
||||||
|
if (cookie_file := Path(self.cookie_path)).exists():
|
||||||
|
with cookie_file.open() as fp:
|
||||||
|
cookie = json.load(fp)
|
||||||
|
if datetime.fromtimestamp(cookie['expiry']) < datetime.now():
|
||||||
|
cookie = self.__register_and_get_cookie(driver, driver_path=driver_path)
|
||||||
|
else:
|
||||||
|
print('Loading the cookie from file')
|
||||||
|
else:
|
||||||
|
cookie = self.__register_and_get_cookie(driver, driver_path=driver_path)
|
||||||
|
|
||||||
|
return unquote(cookie['value'])
|
||||||
|
|
||||||
|
def __register_and_get_cookie(self, driver: str, driver_path: Optional[str] = None) -> dict:
|
||||||
|
mail_client = Emailnator()
|
||||||
|
mail_address = mail_client.get_mail()
|
||||||
|
|
||||||
|
driver = self.__resolve_driver(driver, driver_path=driver_path)
|
||||||
|
driver.get("https://www.poe.com")
|
||||||
|
|
||||||
|
# clicking use email button
|
||||||
|
driver.find_element(By.XPATH, '//button[contains(text(), "Use email")]').click()
|
||||||
|
|
||||||
|
email = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, '//input[@type="email"]')))
|
||||||
|
email.send_keys(mail_address)
|
||||||
|
driver.find_element(By.XPATH, '//button[text()="Go"]').click()
|
||||||
|
|
||||||
|
code = findall(r';">(\d{6,7})</div>', mail_client.get_message())[0]
|
||||||
|
print(code)
|
||||||
|
|
||||||
|
verification_code = WebDriverWait(driver, 30).until(
|
||||||
|
EC.presence_of_element_located((By.XPATH, '//input[@placeholder="Code"]'))
|
||||||
|
)
|
||||||
|
verification_code.send_keys(code)
|
||||||
|
verify_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Verify"]'))
|
||||||
|
login_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Log In"]'))
|
||||||
|
|
||||||
|
WebDriverWait(driver, 30).until(EC.any_of(verify_button, login_button)).click()
|
||||||
|
|
||||||
|
cookie = driver.get_cookie('p-b')
|
||||||
|
|
||||||
|
with open(self.cookie_path, 'w') as fw:
|
||||||
|
json.dump(cookie, fw)
|
||||||
|
|
||||||
|
driver.close()
|
||||||
|
return cookie
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __resolve_driver(cls, driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
|
||||||
|
options = FirefoxOptions() if driver == 'firefox' else ChromeOptions()
|
||||||
|
options.add_argument('-headless')
|
||||||
|
|
||||||
|
if driver_path:
|
||||||
|
options.binary_location = driver_path
|
||||||
|
try:
|
||||||
|
return Firefox(options=options) if driver == 'firefox' else Chrome(options=options)
|
||||||
|
except Exception:
|
||||||
|
raise Exception(SELENIUM_WEB_DRIVER_ERROR_MSG)
|
||||||
|
|
||||||
|
def chat(self, message: str, model: Optional[str] = None) -> str:
|
||||||
|
if model and model not in MODELS:
|
||||||
|
raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
|
||||||
|
model = MODELS[model] if model else self.model
|
||||||
|
response = None
|
||||||
|
for chunk in self.client.send_message(model, message):
|
||||||
|
response = chunk['text']
|
||||||
|
return response
|
||||||
|
|
||||||
|
def create_bot(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
/,
|
||||||
|
prompt: str = '',
|
||||||
|
base_model: str = 'ChatGPT',
|
||||||
|
description: str = '',
|
||||||
|
) -> None:
|
||||||
|
if base_model not in MODELS:
|
||||||
|
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
|
||||||
|
|
||||||
|
response = self.client.create_bot(
|
||||||
|
handle=name,
|
||||||
|
prompt=prompt,
|
||||||
|
base_model=MODELS[base_model],
|
||||||
|
description=description,
|
||||||
|
)
|
||||||
|
print(f'Successfully created bot with name: {response["bot"]["displayName"]}')
|
||||||
|
|
||||||
|
def list_bots(self) -> list:
|
||||||
|
return list(self.client.bot_names.values())
|
||||||
|
|||||||
250
quora/api.py
250
quora/api.py
@@ -1,14 +1,14 @@
|
|||||||
# This file was taken from the repository poe-api https://github.com/ading2210/poe-api and is unmodified
|
# This file was taken from the repository poe-api https://github.com/ading2210/poe-api and is unmodified
|
||||||
# This file is licensed under the GNU GPL v3 and written by @ading2210
|
# This file is licensed under the GNU GPL v3 and written by @ading2210
|
||||||
|
|
||||||
# license:
|
# license:
|
||||||
# ading2210/poe-api: a reverse engineered Python API wrapepr for Quora's Poe
|
# ading2210/poe-api: a reverse engineered Python API wrapepr for Quora's Poe
|
||||||
# Copyright (C) 2023 ading2210
|
# Copyright (C) 2023 ading2210
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# it under the terms of the GNU General Public License as published by
|
# it under the terms of the GNU General Public License as published by
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
# (at your option) any later version.
|
# (at your option) any later version.
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
# This program is distributed in the hope that it will be useful,
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
@@ -18,20 +18,21 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import requests
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
import random
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
import queue
|
|
||||||
import threading
|
|
||||||
import traceback
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import websocket
|
import json
|
||||||
|
import logging
|
||||||
|
import queue
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import requests.adapters
|
||||||
|
import websocket
|
||||||
|
|
||||||
parent_path = Path(__file__).resolve().parent
|
parent_path = Path(__file__).resolve().parent
|
||||||
queries_path = parent_path / "graphql"
|
queries_path = parent_path / "graphql"
|
||||||
@@ -52,10 +53,7 @@ def load_queries():
|
|||||||
|
|
||||||
|
|
||||||
def generate_payload(query_name, variables):
|
def generate_payload(query_name, variables):
|
||||||
return {
|
return {"query": queries[query_name], "variables": variables}
|
||||||
"query": queries[query_name],
|
|
||||||
"variables": variables
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def request_with_retries(method, *args, **kwargs):
|
def request_with_retries(method, *args, **kwargs):
|
||||||
@@ -66,7 +64,8 @@ def request_with_retries(method, *args, **kwargs):
|
|||||||
if r.status_code == 200:
|
if r.status_code == 200:
|
||||||
return r
|
return r
|
||||||
logger.warn(
|
logger.warn(
|
||||||
f"Server returned a status code of {r.status_code} while downloading {url}. Retrying ({i+1}/{attempts})...")
|
f"Server returned a status code of {r.status_code} while downloading {url}. Retrying ({i + 1}/{attempts})..."
|
||||||
|
)
|
||||||
|
|
||||||
raise RuntimeError(f"Failed to download {url} too many times.")
|
raise RuntimeError(f"Failed to download {url} too many times.")
|
||||||
|
|
||||||
@@ -80,12 +79,12 @@ class Client:
|
|||||||
def __init__(self, token, proxy=None):
|
def __init__(self, token, proxy=None):
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
self.session = requests.Session()
|
self.session = requests.Session()
|
||||||
|
self.adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
|
||||||
|
self.session.mount("http://", self.adapter)
|
||||||
|
self.session.mount("https://", self.adapter)
|
||||||
|
|
||||||
if proxy:
|
if proxy:
|
||||||
self.session.proxies = {
|
self.session.proxies = {"http": self.proxy, "https": self.proxy}
|
||||||
"http": self.proxy,
|
|
||||||
"https": self.proxy
|
|
||||||
}
|
|
||||||
logger.info(f"Proxy enabled: {self.proxy}")
|
logger.info(f"Proxy enabled: {self.proxy}")
|
||||||
|
|
||||||
self.active_messages = {}
|
self.active_messages = {}
|
||||||
@@ -117,11 +116,11 @@ class Client:
|
|||||||
self.subscribe()
|
self.subscribe()
|
||||||
|
|
||||||
def extract_formkey(self, html):
|
def extract_formkey(self, html):
|
||||||
script_regex = r'<script>if\(.+\)throw new Error;(.+)</script>'
|
script_regex = r"<script>if\(.+\)throw new Error;(.+)</script>"
|
||||||
script_text = re.search(script_regex, html).group(1)
|
script_text = re.search(script_regex, html).group(1)
|
||||||
key_regex = r'var .="([0-9a-f]+)",'
|
key_regex = r'var .="([0-9a-f]+)",'
|
||||||
key_text = re.search(key_regex, script_text).group(1)
|
key_text = re.search(key_regex, script_text).group(1)
|
||||||
cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]'
|
cipher_regex = r".\[(\d+)\]=.\[(\d+)\]"
|
||||||
cipher_pairs = re.findall(cipher_regex, script_text)
|
cipher_pairs = re.findall(cipher_regex, script_text)
|
||||||
|
|
||||||
formkey_list = [""] * len(cipher_pairs)
|
formkey_list = [""] * len(cipher_pairs)
|
||||||
@@ -143,12 +142,12 @@ class Client:
|
|||||||
if overwrite_vars:
|
if overwrite_vars:
|
||||||
self.formkey = self.extract_formkey(r.text)
|
self.formkey = self.extract_formkey(r.text)
|
||||||
self.viewer = next_data["props"]["pageProps"]["payload"]["viewer"]
|
self.viewer = next_data["props"]["pageProps"]["payload"]["viewer"]
|
||||||
|
self.next_data = next_data
|
||||||
|
|
||||||
return next_data
|
return next_data
|
||||||
|
|
||||||
def get_bot(self, display_name):
|
def get_bot(self, display_name):
|
||||||
url = f'https://poe.com/_next/data/{self.next_data["buildId"]}/{display_name}.json'
|
url = f'https://poe.com/_next/data/{self.next_data["buildId"]}/{display_name}.json'
|
||||||
logger.info("Downloading "+url)
|
|
||||||
|
|
||||||
r = request_with_retries(self.session.get, url)
|
r = request_with_retries(self.session.get, url)
|
||||||
|
|
||||||
@@ -156,8 +155,9 @@ class Client:
|
|||||||
return chat_data
|
return chat_data
|
||||||
|
|
||||||
def get_bots(self, download_next_data=True):
|
def get_bots(self, download_next_data=True):
|
||||||
|
logger.info("Downloading all bots...")
|
||||||
if download_next_data:
|
if download_next_data:
|
||||||
next_data = self.get_next_data()
|
next_data = self.get_next_data(overwrite_vars=True)
|
||||||
else:
|
else:
|
||||||
next_data = self.next_data
|
next_data = self.next_data
|
||||||
|
|
||||||
@@ -165,11 +165,22 @@ class Client:
|
|||||||
raise RuntimeError("Invalid token or no bots are available.")
|
raise RuntimeError("Invalid token or no bots are available.")
|
||||||
bot_list = self.viewer["availableBots"]
|
bot_list = self.viewer["availableBots"]
|
||||||
|
|
||||||
|
threads = []
|
||||||
bots = {}
|
bots = {}
|
||||||
for bot in bot_list:
|
|
||||||
|
def get_bot_thread(bot):
|
||||||
chat_data = self.get_bot(bot["displayName"])
|
chat_data = self.get_bot(bot["displayName"])
|
||||||
bots[chat_data["defaultBotObject"]["nickname"]] = chat_data
|
bots[chat_data["defaultBotObject"]["nickname"]] = chat_data
|
||||||
|
|
||||||
|
for bot in bot_list:
|
||||||
|
thread = threading.Thread(target=get_bot_thread, args=(bot,), daemon=True)
|
||||||
|
threads.append(thread)
|
||||||
|
|
||||||
|
for thread in threads:
|
||||||
|
thread.start()
|
||||||
|
for thread in threads:
|
||||||
|
thread.join()
|
||||||
|
|
||||||
self.bots = bots
|
self.bots = bots
|
||||||
self.bot_names = self.get_bot_names()
|
self.bot_names = self.get_bot_names()
|
||||||
return bots
|
return bots
|
||||||
@@ -181,6 +192,10 @@ class Client:
|
|||||||
bot_names[bot_nickname] = bot_obj["displayName"]
|
bot_names[bot_nickname] = bot_obj["displayName"]
|
||||||
return bot_names
|
return bot_names
|
||||||
|
|
||||||
|
def get_remaining_messages(self, chatbot):
|
||||||
|
chat_data = self.get_bot(self.bot_names[chatbot])
|
||||||
|
return chat_data["defaultBotObject"]["messageLimit"]["numMessagesRemaining"]
|
||||||
|
|
||||||
def get_channel_data(self, channel=None):
|
def get_channel_data(self, channel=None):
|
||||||
logger.info("Downloading channel data...")
|
logger.info("Downloading channel data...")
|
||||||
r = request_with_retries(self.session.get, self.settings_url)
|
r = request_with_retries(self.session.get, self.settings_url)
|
||||||
@@ -192,50 +207,50 @@ class Client:
|
|||||||
if channel is None:
|
if channel is None:
|
||||||
channel = self.channel
|
channel = self.channel
|
||||||
query = f'?min_seq={channel["minSeq"]}&channel={channel["channel"]}&hash={channel["channelHash"]}'
|
query = f'?min_seq={channel["minSeq"]}&channel={channel["channel"]}&hash={channel["channelHash"]}'
|
||||||
return f'wss://{self.ws_domain}.tch.{channel["baseHost"]}/up/{channel["boxName"]}/updates'+query
|
return f'wss://{self.ws_domain}.tch.{channel["baseHost"]}/up/{channel["boxName"]}/updates' + query
|
||||||
|
|
||||||
def send_query(self, query_name, variables):
|
def send_query(self, query_name, variables):
|
||||||
for i in range(20):
|
for i in range(20):
|
||||||
json_data = generate_payload(query_name, variables)
|
json_data = generate_payload(query_name, variables)
|
||||||
payload = json.dumps(json_data, separators=(",", ":"))
|
payload = json.dumps(json_data, separators=(",", ":"))
|
||||||
|
|
||||||
base_string = payload + \
|
base_string = payload + self.gql_headers["poe-formkey"] + "WpuLMiXEKKE98j56k"
|
||||||
self.gql_headers["poe-formkey"] + "WpuLMiXEKKE98j56k"
|
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"content-type": "application/json",
|
"content-type": "application/json",
|
||||||
"poe-tag-id": hashlib.md5(base_string.encode()).hexdigest()
|
"poe-tag-id": hashlib.md5(base_string.encode()).hexdigest(),
|
||||||
}
|
}
|
||||||
headers = {**self.gql_headers, **headers}
|
headers = {**self.gql_headers, **headers}
|
||||||
|
|
||||||
r = request_with_retries(
|
r = request_with_retries(self.session.post, self.gql_url, data=payload, headers=headers)
|
||||||
self.session.post, self.gql_url, data=payload, headers=headers)
|
|
||||||
|
|
||||||
data = r.json()
|
data = r.json()
|
||||||
if data["data"] == None:
|
if data["data"] == None:
|
||||||
logger.warn(
|
logger.warn(f'{query_name} returned an error: {data["errors"][0]["message"]} | Retrying ({i + 1}/20)')
|
||||||
f'{query_name} returned an error: {data["errors"][0]["message"]} | Retrying ({i+1}/20)')
|
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
return r.json()
|
return r.json()
|
||||||
|
|
||||||
raise RuntimeError(f'{query_name} failed too many times.')
|
raise RuntimeError(f"{query_name} failed too many times.")
|
||||||
|
|
||||||
def subscribe(self):
|
def subscribe(self):
|
||||||
logger.info("Subscribing to mutations")
|
logger.info("Subscribing to mutations")
|
||||||
result = self.send_query("SubscriptionsMutation", {
|
result = self.send_query(
|
||||||
"subscriptions": [
|
"SubscriptionsMutation",
|
||||||
{
|
{
|
||||||
"subscriptionName": "messageAdded",
|
"subscriptions": [
|
||||||
"query": queries["MessageAddedSubscription"]
|
{
|
||||||
},
|
"subscriptionName": "messageAdded",
|
||||||
{
|
"query": queries["MessageAddedSubscription"],
|
||||||
"subscriptionName": "viewerStateUpdated",
|
},
|
||||||
"query": queries["ViewerStateUpdatedSubscription"]
|
{
|
||||||
}
|
"subscriptionName": "viewerStateUpdated",
|
||||||
]
|
"query": queries["ViewerStateUpdatedSubscription"],
|
||||||
})
|
},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def ws_run_thread(self):
|
def ws_run_thread(self):
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
@@ -244,7 +259,7 @@ class Client:
|
|||||||
kwargs = {
|
kwargs = {
|
||||||
"proxy_type": proxy_parsed.scheme,
|
"proxy_type": proxy_parsed.scheme,
|
||||||
"http_proxy_host": proxy_parsed.hostname,
|
"http_proxy_host": proxy_parsed.hostname,
|
||||||
"http_proxy_port": proxy_parsed.port
|
"http_proxy_port": proxy_parsed.port,
|
||||||
}
|
}
|
||||||
|
|
||||||
self.ws.run_forever(**kwargs)
|
self.ws.run_forever(**kwargs)
|
||||||
@@ -257,7 +272,7 @@ class Client:
|
|||||||
on_message=self.on_message,
|
on_message=self.on_message,
|
||||||
on_open=self.on_ws_connect,
|
on_open=self.on_ws_connect,
|
||||||
on_error=self.on_ws_error,
|
on_error=self.on_ws_error,
|
||||||
on_close=self.on_ws_close
|
on_close=self.on_ws_close,
|
||||||
)
|
)
|
||||||
t = threading.Thread(target=self.ws_run_thread, daemon=True)
|
t = threading.Thread(target=self.ws_run_thread, daemon=True)
|
||||||
t.start()
|
t.start()
|
||||||
@@ -274,8 +289,7 @@ class Client:
|
|||||||
|
|
||||||
def on_ws_close(self, ws, close_status_code, close_message):
|
def on_ws_close(self, ws, close_status_code, close_message):
|
||||||
self.ws_connected = False
|
self.ws_connected = False
|
||||||
logger.warn(
|
logger.warn(f"Websocket closed with status {close_status_code}: {close_message}")
|
||||||
f"Websocket closed with status {close_status_code}: {close_message}")
|
|
||||||
|
|
||||||
def on_ws_error(self, ws, error):
|
def on_ws_error(self, ws, error):
|
||||||
self.disconnect_ws()
|
self.disconnect_ws()
|
||||||
@@ -328,13 +342,16 @@ class Client:
|
|||||||
self.setup_connection()
|
self.setup_connection()
|
||||||
self.connect_ws()
|
self.connect_ws()
|
||||||
|
|
||||||
message_data = self.send_query("SendMessageMutation", {
|
message_data = self.send_query(
|
||||||
"bot": chatbot,
|
"SendMessageMutation",
|
||||||
"query": message,
|
{
|
||||||
"chatId": self.bots[chatbot]["chatId"],
|
"bot": chatbot,
|
||||||
"source": None,
|
"query": message,
|
||||||
"withChatBreak": with_chat_break
|
"chatId": self.bots[chatbot]["chatId"],
|
||||||
})
|
"source": None,
|
||||||
|
"withChatBreak": with_chat_break,
|
||||||
|
},
|
||||||
|
)
|
||||||
del self.active_messages["pending"]
|
del self.active_messages["pending"]
|
||||||
|
|
||||||
if not message_data["data"]["messageEdgeCreate"]["message"]:
|
if not message_data["data"]["messageEdgeCreate"]["message"]:
|
||||||
@@ -343,8 +360,7 @@ class Client:
|
|||||||
human_message = message_data["data"]["messageEdgeCreate"]["message"]
|
human_message = message_data["data"]["messageEdgeCreate"]["message"]
|
||||||
human_message_id = human_message["node"]["messageId"]
|
human_message_id = human_message["node"]["messageId"]
|
||||||
except TypeError:
|
except TypeError:
|
||||||
raise RuntimeError(
|
raise RuntimeError(f"An unknown error occurred. Raw response data: {message_data}")
|
||||||
f"An unknown error occured. Raw response data: {message_data}")
|
|
||||||
|
|
||||||
# indicate that the current message is waiting for a response
|
# indicate that the current message is waiting for a response
|
||||||
self.active_messages[human_message_id] = None
|
self.active_messages[human_message_id] = None
|
||||||
@@ -354,8 +370,7 @@ class Client:
|
|||||||
message_id = None
|
message_id = None
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
message = self.message_queues[human_message_id].get(
|
message = self.message_queues[human_message_id].get(timeout=timeout)
|
||||||
timeout=timeout)
|
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
del self.active_messages[human_message_id]
|
del self.active_messages[human_message_id]
|
||||||
del self.message_queues[human_message_id]
|
del self.message_queues[human_message_id]
|
||||||
@@ -369,7 +384,7 @@ class Client:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# update info about response
|
# update info about response
|
||||||
message["text_new"] = message["text"][len(last_text):]
|
message["text_new"] = message["text"][len(last_text) :]
|
||||||
last_text = message["text"]
|
last_text = message["text"]
|
||||||
message_id = message["messageId"]
|
message_id = message["messageId"]
|
||||||
|
|
||||||
@@ -380,9 +395,7 @@ class Client:
|
|||||||
|
|
||||||
def send_chat_break(self, chatbot):
|
def send_chat_break(self, chatbot):
|
||||||
logger.info(f"Sending chat break to {chatbot}")
|
logger.info(f"Sending chat break to {chatbot}")
|
||||||
result = self.send_query("AddMessageBreakMutation", {
|
result = self.send_query("AddMessageBreakMutation", {"chatId": self.bots[chatbot]["chatId"]})
|
||||||
"chatId": self.bots[chatbot]["chatId"]
|
|
||||||
})
|
|
||||||
return result["data"]["messageBreakCreate"]["message"]
|
return result["data"]["messageBreakCreate"]["message"]
|
||||||
|
|
||||||
def get_message_history(self, chatbot, count=25, cursor=None):
|
def get_message_history(self, chatbot, count=25, cursor=None):
|
||||||
@@ -399,23 +412,20 @@ class Client:
|
|||||||
|
|
||||||
cursor = str(cursor)
|
cursor = str(cursor)
|
||||||
if count > 50:
|
if count > 50:
|
||||||
messages = self.get_message_history(
|
messages = self.get_message_history(chatbot, count=50, cursor=cursor) + messages
|
||||||
chatbot, count=50, cursor=cursor) + messages
|
|
||||||
while count > 0:
|
while count > 0:
|
||||||
count -= 50
|
count -= 50
|
||||||
new_cursor = messages[0]["cursor"]
|
new_cursor = messages[0]["cursor"]
|
||||||
new_messages = self.get_message_history(
|
new_messages = self.get_message_history(chatbot, min(50, count), cursor=new_cursor)
|
||||||
chatbot, min(50, count), cursor=new_cursor)
|
|
||||||
messages = new_messages + messages
|
messages = new_messages + messages
|
||||||
return messages
|
return messages
|
||||||
elif count <= 0:
|
elif count <= 0:
|
||||||
return messages
|
return messages
|
||||||
|
|
||||||
result = self.send_query("ChatListPaginationQuery", {
|
result = self.send_query(
|
||||||
"count": count,
|
"ChatListPaginationQuery",
|
||||||
"cursor": cursor,
|
{"count": count, "cursor": cursor, "id": self.bots[chatbot]["id"]},
|
||||||
"id": self.bots[chatbot]["id"]
|
)
|
||||||
})
|
|
||||||
query_messages = result["data"]["node"]["messagesConnection"]["edges"]
|
query_messages = result["data"]["node"]["messagesConnection"]["edges"]
|
||||||
messages = query_messages + messages
|
messages = query_messages + messages
|
||||||
return messages
|
return messages
|
||||||
@@ -425,9 +435,7 @@ class Client:
|
|||||||
if not type(message_ids) is list:
|
if not type(message_ids) is list:
|
||||||
message_ids = [int(message_ids)]
|
message_ids = [int(message_ids)]
|
||||||
|
|
||||||
result = self.send_query("DeleteMessageMutation", {
|
result = self.send_query("DeleteMessageMutation", {"messageIds": message_ids})
|
||||||
"messageIds": message_ids
|
|
||||||
})
|
|
||||||
|
|
||||||
def purge_conversation(self, chatbot, count=-1):
|
def purge_conversation(self, chatbot, count=-1):
|
||||||
logger.info(f"Purging messages from {chatbot}")
|
logger.info(f"Purging messages from {chatbot}")
|
||||||
@@ -447,5 +455,91 @@ class Client:
|
|||||||
last_messages = self.get_message_history(chatbot, count=50)[::-1]
|
last_messages = self.get_message_history(chatbot, count=50)[::-1]
|
||||||
logger.info(f"No more messages left to delete.")
|
logger.info(f"No more messages left to delete.")
|
||||||
|
|
||||||
|
def create_bot(
|
||||||
|
self,
|
||||||
|
handle,
|
||||||
|
prompt="",
|
||||||
|
base_model="chinchilla",
|
||||||
|
description="",
|
||||||
|
intro_message="",
|
||||||
|
api_key=None,
|
||||||
|
api_bot=False,
|
||||||
|
api_url=None,
|
||||||
|
prompt_public=True,
|
||||||
|
pfp_url=None,
|
||||||
|
linkification=False,
|
||||||
|
markdown_rendering=True,
|
||||||
|
suggested_replies=False,
|
||||||
|
private=False,
|
||||||
|
):
|
||||||
|
result = self.send_query(
|
||||||
|
"PoeBotCreateMutation",
|
||||||
|
{
|
||||||
|
"model": base_model,
|
||||||
|
"handle": handle,
|
||||||
|
"prompt": prompt,
|
||||||
|
"isPromptPublic": prompt_public,
|
||||||
|
"introduction": intro_message,
|
||||||
|
"description": description,
|
||||||
|
"profilePictureUrl": pfp_url,
|
||||||
|
"apiUrl": api_url,
|
||||||
|
"apiKey": api_key,
|
||||||
|
"isApiBot": api_bot,
|
||||||
|
"hasLinkification": linkification,
|
||||||
|
"hasMarkdownRendering": markdown_rendering,
|
||||||
|
"hasSuggestedReplies": suggested_replies,
|
||||||
|
"isPrivateBot": private,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
data = result["data"]["poeBotCreate"]
|
||||||
|
if data["status"] != "success":
|
||||||
|
raise RuntimeError(f"Poe returned an error while trying to create a bot: {data['status']}")
|
||||||
|
self.get_bots()
|
||||||
|
return data
|
||||||
|
|
||||||
|
def edit_bot(
|
||||||
|
self,
|
||||||
|
bot_id,
|
||||||
|
handle,
|
||||||
|
prompt="",
|
||||||
|
base_model="chinchilla",
|
||||||
|
description="",
|
||||||
|
intro_message="",
|
||||||
|
api_key=None,
|
||||||
|
api_url=None,
|
||||||
|
private=False,
|
||||||
|
prompt_public=True,
|
||||||
|
pfp_url=None,
|
||||||
|
linkification=False,
|
||||||
|
markdown_rendering=True,
|
||||||
|
suggested_replies=False,
|
||||||
|
):
|
||||||
|
result = self.send_query(
|
||||||
|
"PoeBotEditMutation",
|
||||||
|
{
|
||||||
|
"baseBot": base_model,
|
||||||
|
"botId": bot_id,
|
||||||
|
"handle": handle,
|
||||||
|
"prompt": prompt,
|
||||||
|
"isPromptPublic": prompt_public,
|
||||||
|
"introduction": intro_message,
|
||||||
|
"description": description,
|
||||||
|
"profilePictureUrl": pfp_url,
|
||||||
|
"apiUrl": api_url,
|
||||||
|
"apiKey": api_key,
|
||||||
|
"hasLinkification": linkification,
|
||||||
|
"hasMarkdownRendering": markdown_rendering,
|
||||||
|
"hasSuggestedReplies": suggested_replies,
|
||||||
|
"isPrivateBot": private,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
data = result["data"]["poeBotEdit"]
|
||||||
|
if data["status"] != "success":
|
||||||
|
raise RuntimeError(f"Poe returned an error while trying to edit a bot: {data['status']}")
|
||||||
|
self.get_bots()
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
load_queries()
|
load_queries()
|
||||||
|
|||||||
@@ -16,3 +16,15 @@ nBvuNYRLaE4xE4HuzBPiIQ==
|
|||||||
oyae3iClomSrk6RJywZ4iw==
|
oyae3iClomSrk6RJywZ4iw==
|
||||||
1Z27Ul8BTdNOhncT5H6wdg==
|
1Z27Ul8BTdNOhncT5H6wdg==
|
||||||
wfUfJIlwQwUss8l-3kDt3w==
|
wfUfJIlwQwUss8l-3kDt3w==
|
||||||
|
f6Jw_Nr0PietpNCtOCXJTw==
|
||||||
|
6Jc3yCs7XhDRNHa4ZML09g==
|
||||||
|
3vy44sIy-ZlTMofFiFDttw==
|
||||||
|
p9FbMGGiK1rShKgL3YWkDg==
|
||||||
|
pw6LI5Op84lf4HOY7fn91A==
|
||||||
|
QemKm6aothMvqcEgeKFDlQ==
|
||||||
|
cceZzucA-CEHR0Gt6VLYLQ==
|
||||||
|
JRRObMp2RHVn5u4730DPvQ==
|
||||||
|
XNt0wLTjX7Z-EsRR3TJMIQ==
|
||||||
|
csjjirAUKtT5HT1KZUq1kg==
|
||||||
|
8qZdCatCPQZyS7jsO4hkdQ==
|
||||||
|
esnUxcBhvH1DmCJTeld0qw==
|
||||||
|
|||||||
73
quora/graphql/PoeBotCreateMutation.graphql
Normal file
73
quora/graphql/PoeBotCreateMutation.graphql
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
mutation CreateBotMain_poeBotCreate_Mutation(
|
||||||
|
$model: String!
|
||||||
|
$handle: String!
|
||||||
|
$prompt: String!
|
||||||
|
$isPromptPublic: Boolean!
|
||||||
|
$introduction: String!
|
||||||
|
$description: String!
|
||||||
|
$profilePictureUrl: String
|
||||||
|
$apiUrl: String
|
||||||
|
$apiKey: String
|
||||||
|
$isApiBot: Boolean
|
||||||
|
$hasLinkification: Boolean
|
||||||
|
$hasMarkdownRendering: Boolean
|
||||||
|
$hasSuggestedReplies: Boolean
|
||||||
|
$isPrivateBot: Boolean
|
||||||
|
) {
|
||||||
|
poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {
|
||||||
|
status
|
||||||
|
bot {
|
||||||
|
id
|
||||||
|
...BotHeader_bot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment BotHeader_bot on Bot {
|
||||||
|
displayName
|
||||||
|
messageLimit {
|
||||||
|
dailyLimit
|
||||||
|
}
|
||||||
|
...BotImage_bot
|
||||||
|
...BotLink_bot
|
||||||
|
...IdAnnotation_node
|
||||||
|
...botHelpers_useViewerCanAccessPrivateBot
|
||||||
|
...botHelpers_useDeletion_bot
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment BotImage_bot on Bot {
|
||||||
|
displayName
|
||||||
|
...botHelpers_useDeletion_bot
|
||||||
|
...BotImage_useProfileImage_bot
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment BotImage_useProfileImage_bot on Bot {
|
||||||
|
image {
|
||||||
|
__typename
|
||||||
|
... on LocalBotImage {
|
||||||
|
localName
|
||||||
|
}
|
||||||
|
... on UrlBotImage {
|
||||||
|
url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
...botHelpers_useDeletion_bot
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment BotLink_bot on Bot {
|
||||||
|
displayName
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment IdAnnotation_node on Node {
|
||||||
|
__isNode: __typename
|
||||||
|
id
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment botHelpers_useDeletion_bot on Bot {
|
||||||
|
deletionState
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment botHelpers_useViewerCanAccessPrivateBot on Bot {
|
||||||
|
isPrivateBot
|
||||||
|
viewerIsCreator
|
||||||
|
}
|
||||||
24
quora/graphql/PoeBotEditMutation.graphql
Normal file
24
quora/graphql/PoeBotEditMutation.graphql
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
mutation EditBotMain_poeBotEdit_Mutation(
|
||||||
|
$botId: BigInt!
|
||||||
|
$handle: String!
|
||||||
|
$description: String!
|
||||||
|
$introduction: String!
|
||||||
|
$isPromptPublic: Boolean!
|
||||||
|
$baseBot: String!
|
||||||
|
$profilePictureUrl: String
|
||||||
|
$prompt: String!
|
||||||
|
$apiUrl: String
|
||||||
|
$apiKey: String
|
||||||
|
$hasLinkification: Boolean
|
||||||
|
$hasMarkdownRendering: Boolean
|
||||||
|
$hasSuggestedReplies: Boolean
|
||||||
|
$isPrivateBot: Boolean
|
||||||
|
) {
|
||||||
|
poeBotEdit(botId: $botId, handle: $handle, description: $description, introduction: $introduction, isPromptPublic: $isPromptPublic, model: $baseBot, promptPlaintext: $prompt, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {
|
||||||
|
status
|
||||||
|
bot {
|
||||||
|
handle
|
||||||
|
id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,39 +1,66 @@
|
|||||||
import html
|
from json import loads
|
||||||
import json
|
from re import findall
|
||||||
from tls_client import Session
|
from time import sleep
|
||||||
|
|
||||||
class Mail:
|
from fake_useragent import UserAgent
|
||||||
def __init__(self, proxies: str = None, timeout: int = 15, bearer_token: str or None = None) -> None:
|
from requests import Session
|
||||||
self.session = Session(client_identifier='chrome110')
|
|
||||||
self.base_url = 'https://web2.temp-mail.org'
|
|
||||||
self.proxies = proxies
|
|
||||||
self.timeout = timeout
|
|
||||||
|
|
||||||
self.session.headers['authorization'] = f'Bearer {bearer_token}' if bearer_token else None
|
|
||||||
|
|
||||||
def get_mail(self) -> str:
|
|
||||||
status: html = self.session.get(self.base_url).status_code
|
|
||||||
|
|
||||||
try:
|
|
||||||
if status == 200:
|
|
||||||
data = self.session.post(f'{self.base_url}/mailbox').json()
|
|
||||||
|
|
||||||
self.session.headers['authorization'] = f'Bearer {data["token"]}'
|
class Emailnator:
|
||||||
return data["token"], data["mailbox"]
|
def __init__(self) -> None:
|
||||||
|
self.client = Session()
|
||||||
except Exception as e:
|
self.client.get("https://www.emailnator.com/", timeout=6)
|
||||||
print(e)
|
self.cookies = self.client.cookies.get_dict()
|
||||||
return f'Email creation error. {e} | use proxies', False
|
|
||||||
|
|
||||||
def fetch_inbox(self) -> json:
|
|
||||||
return self.session.get(f'{self.base_url}/messages').json()
|
|
||||||
|
|
||||||
def get_message_content(self, message_id: str):
|
|
||||||
return self.session.get(f'{self.base_url}/messages/{message_id}').json()["bodyHtml"]
|
|
||||||
|
|
||||||
# if __name__ == '__main__':
|
self.client.headers = {
|
||||||
|
"authority": "www.emailnator.com",
|
||||||
|
"origin": "https://www.emailnator.com",
|
||||||
|
"referer": "https://www.emailnator.com/",
|
||||||
|
"user-agent": UserAgent().random,
|
||||||
|
"x-xsrf-token": self.client.cookies.get("XSRF-TOKEN")[:-3] + "=",
|
||||||
|
}
|
||||||
|
|
||||||
# email_client = TempMail()
|
self.email = None
|
||||||
# token, email = email_client.get_mail()
|
|
||||||
# print(email)
|
def get_mail(self):
|
||||||
# print(token)
|
response = self.client.post(
|
||||||
|
"https://www.emailnator.com/generate-email",
|
||||||
|
json={
|
||||||
|
"email": [
|
||||||
|
"domain",
|
||||||
|
"plusGmail",
|
||||||
|
"dotGmail",
|
||||||
|
]
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.email = loads(response.text)["email"][0]
|
||||||
|
return self.email
|
||||||
|
|
||||||
|
def get_message(self):
|
||||||
|
print("waiting for code...")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
sleep(2)
|
||||||
|
mail_token = self.client.post(
|
||||||
|
"https://www.emailnator.com/message-list", json={"email": self.email}
|
||||||
|
)
|
||||||
|
|
||||||
|
mail_token = loads(mail_token.text)["messageData"]
|
||||||
|
|
||||||
|
if len(mail_token) == 2:
|
||||||
|
print(mail_token[1]["messageID"])
|
||||||
|
break
|
||||||
|
|
||||||
|
mail_context = self.client.post(
|
||||||
|
"https://www.emailnator.com/message-list",
|
||||||
|
json={
|
||||||
|
"email": self.email,
|
||||||
|
"messageID": mail_token[1]["messageID"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
return mail_context.text
|
||||||
|
|
||||||
|
def get_verification_code(self):
|
||||||
|
return findall(r';">(\d{6,7})</div>', self.get_message())[0]
|
||||||
|
|||||||
@@ -1,3 +1,10 @@
|
|||||||
websocket-client
|
websocket-client
|
||||||
requests
|
requests
|
||||||
tls-client
|
tls-client
|
||||||
|
pypasser
|
||||||
|
names
|
||||||
|
colorama
|
||||||
|
curl_cffi
|
||||||
|
streamlit==1.21.0
|
||||||
|
selenium
|
||||||
|
fake-useragent
|
||||||
11
testing/forefront_test.py
Normal file
11
testing/forefront_test.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
import forefront
|
||||||
|
|
||||||
|
# create an account
|
||||||
|
token = forefront.Account.create(logging=True)
|
||||||
|
print(token)
|
||||||
|
|
||||||
|
# get a response
|
||||||
|
for response in forefront.StreamingCompletion.create(token = token,
|
||||||
|
prompt = 'hello world', model='gpt-4'):
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text, end = '')
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
import ora
|
|
||||||
|
|
||||||
# 1 normal
|
|
||||||
# 2 solidity contract helper
|
|
||||||
# 3 swift project helper
|
|
||||||
# 4 developer gpt
|
|
||||||
# 5 lawsuit bot for spam call
|
|
||||||
# 6 p5.js code help bot
|
|
||||||
# 8 AI professor, for controversial topics
|
|
||||||
# 9 HustleGPT, your entrepreneurial AI
|
|
||||||
# 10 midjourney prompts bot
|
|
||||||
# 11 AI philosophy professor
|
|
||||||
# 12 TypeScript and JavaScript code review bot
|
|
||||||
# 13 credit card transaction details to merchant and location bot
|
|
||||||
# 15 Chemical Compound Similarity and Purchase Tool bot
|
|
||||||
# 16 expert full-stack developer AI
|
|
||||||
# 17 Solana development bot
|
|
||||||
# 18 price guessing game bot
|
|
||||||
# 19 AI Ethicist and Philosopher
|
|
||||||
|
|
||||||
gpt4_chatbot_ids = ['b8b12eaa-5d47-44d3-92a6-4d706f2bcacf', 'fbe53266-673c-4b70-9d2d-d247785ccd91', 'bd5781cf-727a-45e9-80fd-a3cfce1350c6', '993a0102-d397-47f6-98c3-2587f2c9ec3a', 'ae5c524e-d025-478b-ad46-8843a5745261', 'cc510743-e4ab-485e-9191-76960ecb6040', 'a5cd2481-8e24-4938-aa25-8e26d6233390', '6bca5930-2aa1-4bf4-96a7-bea4d32dcdac', '884a5f2b-47a2-47a5-9e0f-851bbe76b57c', 'd5f3c491-0e74-4ef7-bdca-b7d27c59e6b3', 'd72e83f6-ef4e-4702-844f-cf4bd432eef7', '6e80b170-11ed-4f1a-b992-fd04d7a9e78c', '8ef52d68-1b01-466f-bfbf-f25c13ff4a72', 'd0674e11-f22e-406b-98bc-c1ba8564f749', 'a051381d-6530-463f-be68-020afddf6a8f', '99c0afa1-9e32-4566-8909-f4ef9ac06226', '1be65282-9c59-4a96-99f8-d225059d9001', 'dba16bd8-5785-4248-a8e9-b5d1ecbfdd60', '1731450d-3226-42d0-b41c-4129fe009524', '8e74635d-000e-4819-ab2c-4e986b7a0f48', 'afe7ed01-c1ac-4129-9c71-2ca7f3800b30', 'e374c37a-8c44-4f0e-9e9f-1ad4609f24f5']
|
|
||||||
chatbot_id = gpt4_chatbot_ids[0]
|
|
||||||
|
|
||||||
model = ora.CompletionModel.load(chatbot_id, 'gpt-4')
|
|
||||||
response = ora.Completion.create(model, 'hello')
|
|
||||||
|
|
||||||
print(response)
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
# inport ora
|
|
||||||
import ora
|
|
||||||
|
|
||||||
# create model
|
|
||||||
model = ora.CompletionModel.create(
|
|
||||||
system_prompt = 'You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible',
|
|
||||||
description = 'ChatGPT Openai Language Model',
|
|
||||||
name = 'gpt-3.5')
|
|
||||||
|
|
||||||
print(model.id)
|
|
||||||
|
|
||||||
# init conversation (will give you a conversationId)
|
|
||||||
init = ora.Completion.create(
|
|
||||||
model = model,
|
|
||||||
prompt = 'hello world')
|
|
||||||
|
|
||||||
print(init.completion.choices[0].text)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
# pass in conversationId to continue conversation
|
|
||||||
|
|
||||||
prompt = input('>>> ')
|
|
||||||
response = ora.Completion.create(
|
|
||||||
model = model,
|
|
||||||
prompt = prompt,
|
|
||||||
includeHistory = True,
|
|
||||||
conversationId = init.id)
|
|
||||||
|
|
||||||
print(response.completion.choices[0].text)
|
|
||||||
@@ -1,7 +1,12 @@
|
|||||||
import phind
|
import phind
|
||||||
|
|
||||||
|
# set cf_clearance cookie ( not needed at the moment)
|
||||||
|
phind.cf_clearance = 'MDzwnr3ZWk_ap8u.iwwMR5F3WccfOkhUy_zGNDpcF3s-1682497341-0-160'
|
||||||
|
phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
||||||
|
|
||||||
prompt = 'hello world'
|
prompt = 'hello world'
|
||||||
|
|
||||||
|
# normal completion
|
||||||
result = phind.Completion.create(
|
result = phind.Completion.create(
|
||||||
model = 'gpt-4',
|
model = 'gpt-4',
|
||||||
prompt = prompt,
|
prompt = prompt,
|
||||||
@@ -10,4 +15,18 @@ result = phind.Completion.create(
|
|||||||
detailed = False,
|
detailed = False,
|
||||||
codeContext = '') # up to 3000 chars of code
|
codeContext = '') # up to 3000 chars of code
|
||||||
|
|
||||||
print(result.completion.choices[0].text)
|
print(result.completion.choices[0].text)
|
||||||
|
|
||||||
|
prompt = 'who won the quatar world cup'
|
||||||
|
|
||||||
|
# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
|
||||||
|
# stream completion
|
||||||
|
for result in phind.StreamingCompletion.create(
|
||||||
|
model = 'gpt-4',
|
||||||
|
prompt = prompt,
|
||||||
|
results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
|
||||||
|
creative = False,
|
||||||
|
detailed = False,
|
||||||
|
codeContext = ''): # up to 3000 chars of code
|
||||||
|
|
||||||
|
print(result.completion.choices[0].text, end='', flush=True)
|
||||||
104
testing/poe_account_create_test.py
Normal file
104
testing/poe_account_create_test.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
from requests import Session
|
||||||
|
from tls_client import Session as TLS
|
||||||
|
from json import dumps
|
||||||
|
from hashlib import md5
|
||||||
|
from time import sleep
|
||||||
|
from re import findall
|
||||||
|
from pypasser import reCaptchaV3
|
||||||
|
from quora import extract_formkey
|
||||||
|
from quora.mail import Emailnator
|
||||||
|
from twocaptcha import TwoCaptcha
|
||||||
|
|
||||||
|
solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
|
||||||
|
|
||||||
|
class Account:
|
||||||
|
def create(proxy: None or str = None, logging: bool = False, enable_bot_creation: bool = False):
|
||||||
|
client = TLS(client_identifier='chrome110')
|
||||||
|
client.proxies = {
|
||||||
|
'http': f'http://{proxy}',
|
||||||
|
'https': f'http://{proxy}'} if proxy else None
|
||||||
|
|
||||||
|
mail_client = Emailnator()
|
||||||
|
mail_address = mail_client.get_mail()
|
||||||
|
|
||||||
|
if logging: print('email', mail_address)
|
||||||
|
|
||||||
|
client.headers = {
|
||||||
|
'authority' : 'poe.com',
|
||||||
|
'accept' : '*/*',
|
||||||
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'content-type' : 'application/json',
|
||||||
|
'origin' : 'https://poe.com',
|
||||||
|
'poe-formkey' : 'null',
|
||||||
|
'poe-tag-id' : 'null',
|
||||||
|
'poe-tchannel' : 'null',
|
||||||
|
'referer' : 'https://poe.com/login',
|
||||||
|
'sec-ch-ua' : '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
|
'sec-ch-ua-mobile' : '?0',
|
||||||
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'same-origin',
|
||||||
|
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
||||||
|
}
|
||||||
|
|
||||||
|
client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
|
||||||
|
client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
|
||||||
|
|
||||||
|
#token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal')
|
||||||
|
token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
|
||||||
|
url = 'https://poe.com/login?redirect_url=%2F',
|
||||||
|
version = 'v3',
|
||||||
|
enterprise = 1,
|
||||||
|
invisible = 1,
|
||||||
|
action = 'login',)['code']
|
||||||
|
|
||||||
|
payload = dumps(separators = (',', ':'), obj = {
|
||||||
|
'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
|
||||||
|
'variables': {
|
||||||
|
'emailAddress' : mail_address,
|
||||||
|
'phoneNumber' : None,
|
||||||
|
'recaptchaToken': token
|
||||||
|
},
|
||||||
|
'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
|
||||||
|
})
|
||||||
|
|
||||||
|
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
||||||
|
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
||||||
|
|
||||||
|
print(dumps(client.headers, indent=4))
|
||||||
|
|
||||||
|
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
||||||
|
|
||||||
|
if 'automated_request_detected' in response.text:
|
||||||
|
print('please try using a proxy / wait for fix')
|
||||||
|
|
||||||
|
if 'Bad Request' in response.text:
|
||||||
|
if logging: print('bad request, retrying...' , response.json())
|
||||||
|
quit()
|
||||||
|
|
||||||
|
if logging: print('send_code' ,response.json())
|
||||||
|
|
||||||
|
mail_content = mail_client.get_message()
|
||||||
|
mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
|
||||||
|
|
||||||
|
if logging: print('code', mail_token)
|
||||||
|
|
||||||
|
payload = dumps(separators = (',', ':'), obj={
|
||||||
|
"queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
|
||||||
|
"variables": {
|
||||||
|
"verificationCode": str(mail_token),
|
||||||
|
"emailAddress": mail_address,
|
||||||
|
"phoneNumber": None
|
||||||
|
},
|
||||||
|
"query": "mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n"
|
||||||
|
})
|
||||||
|
|
||||||
|
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
||||||
|
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
||||||
|
|
||||||
|
response = client.post('https://poe.com/api/gql_POST', data = payload)
|
||||||
|
if logging: print('verify_code', response.json())
|
||||||
|
|
||||||
|
|
||||||
|
Account.create(proxy = 'xtekky:wegwgwegwed_streaming-1@geo.iproyal.com:12321', logging = True)
|
||||||
7
testing/sqlchat_test.py
Normal file
7
testing/sqlchat_test.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
import sqlchat
|
||||||
|
|
||||||
|
for response in sqlchat.StreamCompletion.create(
|
||||||
|
prompt = 'write python code to reverse a string',
|
||||||
|
messages = []):
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text, end='')
|
||||||
@@ -1,10 +1,7 @@
|
|||||||
import you
|
import you
|
||||||
|
|
||||||
# simple request with links and details
|
# simple request with links and details
|
||||||
response = you.Completion.create(
|
response = you.Completion.create(prompt="hello world", detailed=True, include_links=True)
|
||||||
prompt = "hello world",
|
|
||||||
detailed = True,
|
|
||||||
includelinks = True,)
|
|
||||||
|
|
||||||
print(response)
|
print(response)
|
||||||
|
|
||||||
@@ -16,17 +13,15 @@ print(response)
|
|||||||
# }
|
# }
|
||||||
# }
|
# }
|
||||||
|
|
||||||
#chatbot
|
# chatbot
|
||||||
|
|
||||||
chat = []
|
chat = []
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
prompt = input("You: ")
|
prompt = input("You: ")
|
||||||
|
|
||||||
response = you.Completion.create(
|
response = you.Completion.create(prompt=prompt, chat=chat)
|
||||||
prompt = prompt,
|
|
||||||
chat = chat)
|
|
||||||
|
|
||||||
print("Bot:", response["response"])
|
print("Bot:", response["response"])
|
||||||
|
|
||||||
chat.append({"question": prompt, "answer": response["response"]})
|
chat.append({"question": prompt, "answer": response["response"]})
|
||||||
|
|||||||
2
unfinished/bard/README.md
Normal file
2
unfinished/bard/README.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
to do:
|
||||||
|
- code refractoring
|
||||||
2
unfinished/bing/README.md
Normal file
2
unfinished/bing/README.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
to do:
|
||||||
|
- code refractoring
|
||||||
31
unfinished/cocalc/__init__.py
Normal file
31
unfinished/cocalc/__init__.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
import requests
|
||||||
|
|
||||||
|
class Completion:
|
||||||
|
def create(prompt="What is the square root of pi",
|
||||||
|
system_prompt="ASSUME I HAVE FULL ACCESS TO COCALC. ENCLOSE MATH IN $. INCLUDE THE LANGUAGE DIRECTLY AFTER THE TRIPLE BACKTICKS IN ALL MARKDOWN CODE BLOCKS. How can I do the following using CoCalc?") -> str:
|
||||||
|
|
||||||
|
# Initialize a session
|
||||||
|
session = requests.Session()
|
||||||
|
|
||||||
|
# Set headers for the request
|
||||||
|
headers = {
|
||||||
|
'Accept': '*/*',
|
||||||
|
'Accept-Language': 'en-US,en;q=0.5',
|
||||||
|
'Origin': 'https://cocalc.com',
|
||||||
|
'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
session.headers.update(headers)
|
||||||
|
|
||||||
|
# Set the data that will be submitted
|
||||||
|
payload = {
|
||||||
|
"input": prompt,
|
||||||
|
"system": system_prompt,
|
||||||
|
"tag": "next:index"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Submit the request
|
||||||
|
response = session.post("https://cocalc.com/api/v2/openai/chatgpt", json=payload).json()
|
||||||
|
|
||||||
|
# Return the results
|
||||||
|
return response
|
||||||
8
unfinished/cocalc/cocalc_test.py
Normal file
8
unfinished/cocalc/cocalc_test.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
import cocalc
|
||||||
|
|
||||||
|
|
||||||
|
response = cocalc.Completion.create(
|
||||||
|
prompt = 'hello world'
|
||||||
|
)
|
||||||
|
|
||||||
|
print(response)
|
||||||
31
unfinished/easyai/main.py
Normal file
31
unfinished/easyai/main.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
from requests import get
|
||||||
|
from os import urandom
|
||||||
|
from json import loads
|
||||||
|
|
||||||
|
sessionId = urandom(10).hex()
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'Accept': 'text/event-stream',
|
||||||
|
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'Cache-Control': 'no-cache',
|
||||||
|
'Connection': 'keep-alive',
|
||||||
|
'Pragma': 'no-cache',
|
||||||
|
'Referer': 'http://easy-ai.ink/chat',
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||||
|
'token': 'null',
|
||||||
|
}
|
||||||
|
|
||||||
|
while True:
|
||||||
|
prompt = input('you: ')
|
||||||
|
|
||||||
|
params = {
|
||||||
|
'message': prompt,
|
||||||
|
'sessionId': sessionId
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk in get('http://easy-ai.ink/easyapi/v1/chat/completions', params = params,
|
||||||
|
headers = headers, verify = False, stream = True).iter_lines():
|
||||||
|
|
||||||
|
if b'content' in chunk:
|
||||||
|
data = loads(chunk.decode('utf-8').split('data:')[1])
|
||||||
|
print(data['content'], end='')
|
||||||
4
unfinished/gptbz/README.md
Normal file
4
unfinished/gptbz/README.md
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
https://chat.gpt.bz
|
||||||
|
|
||||||
|
to do:
|
||||||
|
- code refractoring
|
||||||
30
unfinished/gptbz/__init__.py
Normal file
30
unfinished/gptbz/__init__.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
import websockets
|
||||||
|
from json import dumps, loads
|
||||||
|
|
||||||
|
async def test():
|
||||||
|
async with websockets.connect('wss://chatgpt.func.icu/conversation+ws') as wss:
|
||||||
|
|
||||||
|
await wss.send(dumps(separators=(',', ':'), obj = {
|
||||||
|
'content_type':'text',
|
||||||
|
'engine':'chat-gpt',
|
||||||
|
'parts':['hello world'],
|
||||||
|
'options':{}
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
ended = None
|
||||||
|
|
||||||
|
while not ended:
|
||||||
|
try:
|
||||||
|
response = await wss.recv()
|
||||||
|
json_response = loads(response)
|
||||||
|
print(json_response)
|
||||||
|
|
||||||
|
ended = json_response.get('eof')
|
||||||
|
|
||||||
|
if not ended:
|
||||||
|
print(json_response['content']['parts'][0])
|
||||||
|
|
||||||
|
except websockets.ConnectionClosed:
|
||||||
|
break
|
||||||
|
|
||||||
2
unfinished/openai/README.md
Normal file
2
unfinished/openai/README.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
to do:
|
||||||
|
- code refractoring
|
||||||
3
unfinished/openaihosted/README.md
Normal file
3
unfinished/openaihosted/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
writegpt.ai
|
||||||
|
to do:
|
||||||
|
- code ref
|
||||||
38
unfinished/openaihosted/__init__.py
Normal file
38
unfinished/openaihosted/__init__.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import requests
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'authority': 'openai.a2hosted.com',
|
||||||
|
'accept': 'text/event-stream',
|
||||||
|
'accept-language': 'en-US,en;q=0.9,id;q=0.8,ja;q=0.7',
|
||||||
|
'cache-control': 'no-cache',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'cross-site',
|
||||||
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.0.0',
|
||||||
|
}
|
||||||
|
|
||||||
|
def create_query_param(conversation):
|
||||||
|
encoded_conversation = json.dumps(conversation)
|
||||||
|
return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
|
||||||
|
|
||||||
|
user_input = input("Enter your message: ")
|
||||||
|
|
||||||
|
data = [
|
||||||
|
{"role": "system", "content": "You are a helpful assistant."},
|
||||||
|
{"role": "user", "content": "hi"},
|
||||||
|
{"role": "assistant", "content": "Hello! How can I assist you today?"},
|
||||||
|
{"role": "user", "content": user_input},
|
||||||
|
]
|
||||||
|
|
||||||
|
query_param = create_query_param(data)
|
||||||
|
url = f'https://openai.a2hosted.com/chat?q={query_param}'
|
||||||
|
|
||||||
|
response = requests.get(url, headers=headers, stream=True)
|
||||||
|
|
||||||
|
for message in response.iter_content(chunk_size=1024):
|
||||||
|
message = message.decode('utf-8')
|
||||||
|
msg_match, num_match = re.search(r'"msg":"(.*?)"', message), re.search(r'\[DONE\] (\d+)', message)
|
||||||
|
if msg_match: print(msg_match.group(1))
|
||||||
|
if num_match: print(num_match.group(1))
|
||||||
5
unfinished/openprompt/README.md
Normal file
5
unfinished/openprompt/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
https://openprompt.co/
|
||||||
|
|
||||||
|
to do:
|
||||||
|
- finish integrating email client
|
||||||
|
- code refractoring
|
||||||
64
unfinished/openprompt/create.py
Normal file
64
unfinished/openprompt/create.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
from requests import post, get
|
||||||
|
from json import dumps
|
||||||
|
#from mail import MailClient
|
||||||
|
from time import sleep
|
||||||
|
from re import findall
|
||||||
|
|
||||||
|
html = get('https://developermail.com/mail/')
|
||||||
|
print(html.cookies.get('mailboxId'))
|
||||||
|
email = findall(r'mailto:(.*)">', html.text)[0]
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'apikey': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InVzanNtdWZ1emRjcnJjZXVobnlqIiwicm9sZSI6ImFub24iLCJpYXQiOjE2NzgyODYyMzYsImV4cCI6MTk5Mzg2MjIzNn0.2MQ9Lkh-gPqQwV08inIgqozfbYm5jdYWtf-rn-wfQ7U',
|
||||||
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||||
|
'x-client-info': '@supabase/auth-helpers-nextjs@0.5.6',
|
||||||
|
}
|
||||||
|
|
||||||
|
json_data = {
|
||||||
|
'email' : email,
|
||||||
|
'password': 'T4xyt4Yn6WWQ4NC',
|
||||||
|
'data' : {},
|
||||||
|
'gotrue_meta_security': {},
|
||||||
|
}
|
||||||
|
|
||||||
|
response = post('https://usjsmufuzdcrrceuhnyj.supabase.co/auth/v1/signup', headers=headers, json=json_data)
|
||||||
|
print(response.json())
|
||||||
|
|
||||||
|
# email_link = None
|
||||||
|
# while not email_link:
|
||||||
|
# sleep(1)
|
||||||
|
|
||||||
|
# mails = mailbox.getmails()
|
||||||
|
# print(mails)
|
||||||
|
|
||||||
|
|
||||||
|
quit()
|
||||||
|
|
||||||
|
url = input("Enter the url: ")
|
||||||
|
response = get(url, allow_redirects=False)
|
||||||
|
|
||||||
|
# https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup
|
||||||
|
|
||||||
|
redirect = response.headers.get('location')
|
||||||
|
access_token = redirect.split('&')[0].split('=')[1]
|
||||||
|
refresh_token = redirect.split('&')[2].split('=')[1]
|
||||||
|
|
||||||
|
supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':'))
|
||||||
|
print(supabase_auth_token)
|
||||||
|
|
||||||
|
cookies = {
|
||||||
|
'supabase-auth-token': supabase_auth_token
|
||||||
|
}
|
||||||
|
|
||||||
|
json_data = {
|
||||||
|
'messages': [
|
||||||
|
{
|
||||||
|
'role': 'user',
|
||||||
|
'content': 'how do I reverse a string in python?'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True)
|
||||||
|
for chunk in response.iter_content(chunk_size=1024):
|
||||||
|
print(chunk)
|
||||||
109
unfinished/openprompt/mail.py
Normal file
109
unfinished/openprompt/mail.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
import requests
|
||||||
|
import email
|
||||||
|
|
||||||
|
class MailClient:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.username = None
|
||||||
|
self.token = None
|
||||||
|
self.raw = None
|
||||||
|
self.mailids = None
|
||||||
|
self.mails = None
|
||||||
|
self.mail = None
|
||||||
|
|
||||||
|
def create(self, force=False):
|
||||||
|
headers = {
|
||||||
|
'accept': 'application/json',
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.username:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.response = requests.put(
|
||||||
|
'https://www.developermail.com/api/v1/mailbox', headers=headers)
|
||||||
|
self.response = self.response.json()
|
||||||
|
self.username = self.response['result']['name']
|
||||||
|
self.token = self.response['result']['token']
|
||||||
|
|
||||||
|
return {'username': self.username, 'token': self.token}
|
||||||
|
|
||||||
|
def destroy(self):
|
||||||
|
headers = {
|
||||||
|
'accept': 'application/json',
|
||||||
|
'X-MailboxToken': self.token,
|
||||||
|
}
|
||||||
|
self.response = requests.delete(
|
||||||
|
f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers)
|
||||||
|
self.response = self.response.json()
|
||||||
|
self.username = None
|
||||||
|
self.token = None
|
||||||
|
return self.response
|
||||||
|
|
||||||
|
def newtoken(self):
|
||||||
|
headers = {
|
||||||
|
'accept': 'application/json',
|
||||||
|
'X-MailboxToken': self.token,
|
||||||
|
}
|
||||||
|
self.response = requests.put(
|
||||||
|
f'https://www.developermail.com/api/v1/mailbox/{self.username}/token', headers=headers)
|
||||||
|
self.response = self.response.json()
|
||||||
|
self.token = self.response['result']['token']
|
||||||
|
return {'username': self.username, 'token': self.token}
|
||||||
|
|
||||||
|
def getmailids(self):
|
||||||
|
headers = {
|
||||||
|
'accept': 'application/json',
|
||||||
|
'X-MailboxToken': self.token,
|
||||||
|
}
|
||||||
|
|
||||||
|
self.response = requests.get(
|
||||||
|
f'https://www.developermail.com/api/v1/mailbox/{self.username}', headers=headers)
|
||||||
|
self.response = self.response.json()
|
||||||
|
self.mailids = self.response['result']
|
||||||
|
return self.mailids
|
||||||
|
|
||||||
|
def getmails(self, mailids: list = None):
|
||||||
|
headers = {
|
||||||
|
'accept': 'application/json',
|
||||||
|
'X-MailboxToken': self.token,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
}
|
||||||
|
|
||||||
|
if mailids is None:
|
||||||
|
mailids = self.mailids
|
||||||
|
|
||||||
|
data = str(mailids)
|
||||||
|
|
||||||
|
self.response = requests.post(
|
||||||
|
f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages', headers=headers, data=data)
|
||||||
|
self.response = self.response.json()
|
||||||
|
self.mails = self.response['result']
|
||||||
|
return self.mails
|
||||||
|
|
||||||
|
def getmail(self, mailid: str, raw=False):
|
||||||
|
headers = {
|
||||||
|
'accept': 'application/json',
|
||||||
|
'X-MailboxToken': self.token,
|
||||||
|
}
|
||||||
|
self.response = requests.get(
|
||||||
|
f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers)
|
||||||
|
self.response = self.response.json()
|
||||||
|
self.mail = self.response['result']
|
||||||
|
if raw is False:
|
||||||
|
self.mail = email.message_from_string(self.mail)
|
||||||
|
return self.mail
|
||||||
|
|
||||||
|
def delmail(self, mailid: str):
|
||||||
|
headers = {
|
||||||
|
'accept': 'application/json',
|
||||||
|
'X-MailboxToken': self.token,
|
||||||
|
}
|
||||||
|
self.response = requests.delete(
|
||||||
|
f'https://www.developermail.com/api/v1/mailbox/{self.username}/messages/{mailid}', headers=headers)
|
||||||
|
self.response = self.response.json()
|
||||||
|
return self.response
|
||||||
|
|
||||||
|
|
||||||
|
client = MailClient()
|
||||||
|
client.newtoken()
|
||||||
|
print(client.getmails())
|
||||||
37
unfinished/openprompt/main.py
Normal file
37
unfinished/openprompt/main.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
import requests
|
||||||
|
|
||||||
|
cookies = {
|
||||||
|
'supabase-auth-token': '["eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk1NzQyLCJzdWIiOiJlOGExOTdiNS03YTAxLTQ3MmEtODQ5My1mNGUzNTNjMzIwNWUiLCJlbWFpbCI6InFlY3RncHZhamlibGNjQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTA5NDJ9XSwic2Vzc2lvbl9pZCI6IjIwNTg5MmE5LWU5YTAtNDk2Yi1hN2FjLWEyMWVkMTkwZDA4NCJ9.o7UgHpiJMfa6W-UKCSCnAncIfeOeiHz-51sBmokg0MA","RtPKeb7KMMC9Dn2fZOfiHA",null,null,null]',
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'authority': 'openprompt.co',
|
||||||
|
'accept': '*/*',
|
||||||
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'content-type': 'application/json',
|
||||||
|
# 'cookie': 'supabase-auth-token=%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjkzMjQ4LCJzdWIiOiJlODQwNTZkNC0xZWJhLTQwZDktOWU1Mi1jMTc4MTUwN2VmNzgiLCJlbWFpbCI6InNia2didGJnZHB2bHB0ZUBidWdmb28uY29tIiwicGhvbmUiOiIiLCJhcHBfbWV0YWRhdGEiOnsicHJvdmlkZXIiOiJlbWFpbCIsInByb3ZpZGVycyI6WyJlbWFpbCJdfSwidXNlcl9tZXRhZGF0YSI6e30sInJvbGUiOiJhdXRoZW50aWNhdGVkIiwiYWFsIjoiYWFsMSIsImFtciI6W3sibWV0aG9kIjoib3RwIiwidGltZXN0YW1wIjoxNjgxNjg4NDQ4fV0sInNlc3Npb25faWQiOiJiNDhlMmU3NS04NzlhLTQxZmEtYjQ4MS01OWY0OTgxMzg3YWQifQ.5-3E7WvMMVkXewD1qA26Rv4OFSTT82wYUBXNGcYaYfQ%22%2C%22u5TGGMMeT3zZA0agm5HGuA%22%2Cnull%2Cnull%2Cnull%5D',
|
||||||
|
'origin': 'https://openprompt.co',
|
||||||
|
'referer': 'https://openprompt.co/ChatGPT',
|
||||||
|
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
||||||
|
'sec-ch-ua-mobile': '?0',
|
||||||
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'same-origin',
|
||||||
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
|
||||||
|
json_data = {
|
||||||
|
'messages': [
|
||||||
|
{
|
||||||
|
'role': 'user',
|
||||||
|
'content': 'hello world',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data, stream=True)
|
||||||
|
for chunk in response.iter_content(chunk_size=1024):
|
||||||
|
print(chunk)
|
||||||
|
|
||||||
|
|
||||||
7
unfinished/openprompt/test.py
Normal file
7
unfinished/openprompt/test.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV'
|
||||||
|
supabase_auth_token= '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
|
||||||
|
|
||||||
|
|
||||||
|
idk = [
|
||||||
|
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8",
|
||||||
|
"_Zp8uXIA2InTDKYgo8TCqA",None,None,None]
|
||||||
44
unfinished/t3nsor/README.md
Normal file
44
unfinished/t3nsor/README.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
### note: currently patched
|
||||||
|
|
||||||
|
### Example: `t3nsor` (use like openai pypi package) <a name="example-t3nsor"></a>
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Import t3nsor
|
||||||
|
import t3nsor
|
||||||
|
|
||||||
|
# t3nsor.Completion.create
|
||||||
|
# t3nsor.StreamCompletion.create
|
||||||
|
|
||||||
|
[...]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example Chatbot
|
||||||
|
```python
|
||||||
|
messages = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
user = input('you: ')
|
||||||
|
|
||||||
|
t3nsor_cmpl = t3nsor.Completion.create(
|
||||||
|
prompt = user,
|
||||||
|
messages = messages
|
||||||
|
)
|
||||||
|
|
||||||
|
print('gpt:', t3nsor_cmpl.completion.choices[0].text)
|
||||||
|
|
||||||
|
messages.extend([
|
||||||
|
{'role': 'user', 'content': user },
|
||||||
|
{'role': 'assistant', 'content': t3nsor_cmpl.completion.choices[0].text}
|
||||||
|
])
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Streaming Response:
|
||||||
|
|
||||||
|
```python
|
||||||
|
for response in t3nsor.StreamCompletion.create(
|
||||||
|
prompt = 'write python code to reverse a string',
|
||||||
|
messages = []):
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text)
|
||||||
|
```
|
||||||
@@ -106,6 +106,8 @@ class StreamCompletion:
|
|||||||
def create(
|
def create(
|
||||||
prompt: str = 'hello world',
|
prompt: str = 'hello world',
|
||||||
messages: list = []) -> T3nsorResponse:
|
messages: list = []) -> T3nsorResponse:
|
||||||
|
|
||||||
|
print('t3nsor api is down, this may not work, refer to another module')
|
||||||
|
|
||||||
response = post('https://www.t3nsor.tech/api/chat', headers = headers, stream = True, json = Completion.model | {
|
response = post('https://www.t3nsor.tech/api/chat', headers = headers, stream = True, json = Completion.model | {
|
||||||
'messages' : messages,
|
'messages' : messages,
|
||||||
@@ -132,4 +134,4 @@ class StreamCompletion:
|
|||||||
'completion_chars' : len(chunk.decode()),
|
'completion_chars' : len(chunk.decode()),
|
||||||
'total_chars' : len(prompt) + len(chunk.decode())
|
'total_chars' : len(prompt) + len(chunk.decode())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
12
unfinished/test.py
Normal file
12
unfinished/test.py
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
import gptbz
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
|
||||||
|
# asyncio.run(gptbz.test())
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
image = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAoALQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigDkZP+EhS4W0k1S+VntQPtEWmRsgkNwBu4ZsHYQNvTbls5BA6DS7uW6S6E0VwjQ3UsQM0Pl71DZUrydy4IAbvg8CsTx3DbHQLi4uVs9scWzdd+dsAaWI4PlfNjKjpzkDtmpoNSgbWYpLR7Ty5bq5trw/vd3nIowBxtzti53Y6fKT3z2djra56fNbv07HR1z13ZRX/jDyby0+02f9nfdmsEeHd5o/5anndwPkxjjPWuhrh9Mvra88RLqccmnOHtvLEqfaN+1r1lUcjbg4PbO4H+Cqk+hnRi9ZI29E0uC2N1eG3Am+13DITZRwuqlsYG0ZYEKCGJywwT2AtWTapcW1vcPPCiyrE5ils2SRQV+dW/ecMT/3zgj5utZtpdwL4e190e02W9xeb9vm7FOWY78/NnnJ28f3ahkgtptD8JRlbMos9s8QPnbcrEzDy/4sgDjzOMdeaSZbi23f8vmbfn6hBFuktmuWWPJWCNELNuxgbpcDj1Pbr2qJ9bMVyIZNK1JVLyr5qwB1AjUNu+Uk4bovGSRjAqCTwdoElv5B02MReT5G1HZfk8zzMcEfx81YlsJ7NJX0tolZzNK8dyZJA8jDIwd3yjcBkAHjOAM09SP3b/q36mkjiSNXAYBgCNykH8QeRWdfaw1ldSW66XqN0UgE++3iBRsvt2BiQN/8WPQZqharF9oN5osVml1NLbLqUbmUFY/L4CrgYYKy4yoGM5xjhlnc2OoeMrfULV7aQXGkExyYlErJ5oPQ/Jtye/zZ9qLgqaTba0NyzvPtizH7NcQeVM8OJ49u/acbl9VPY96s1geFjF/xOhF9m41Wfd9n8z73BO7f/Fzzt+X0q7c6mWvRY2DwSXcUsQuUff8Auo2ySflB+YqrYyQOmTyARPQmVP32kLqF1cbmsrJZkuni3rcfZ98UfzKvJJUE4JOM5wpODwDl3Meuf2rHbRatcBJXuj5iachjhUovlBmZudrNkEZ3HIOMGlhREhbS9He2a8MO6a4fzmGDMQ3zAk5yZ8DzMgj0yRuWdha2CzLawrEJpnnkx/G7HLMfc0bl3VNf5pff/kVLS8uxFHHJZ3s5Xyo2mZI4y2VBZyN44B6gDrwAcVZ069Go2EV2Le5t/MBPlXMZjkXnGGU9OlULSdbfTt8LWy5mt0JAkK4YRLjnnODx26Z71TXULEWn/CUWDwmxeDbM4WbkCXJbaB23SnlM5PUDNF7CcObZf12OlpCcDoTz2oVlcZVgRkjIPccGo7hgsSk7ceYg+bP94elUYpamda64915GdH1SESxiTM0KjZmTZtbDHB53Y/u89eK1qw4xD9l0mIC3wLdCg/eYwHh+73x0+9znb71uUkXUSWyCiiimZhRRRQBieL5Hj8LXjxySxuNmGivFtWHzr0lbhfx69O9MvHdZpbKKWYnUluNji+VGikVFULHnkdGbjO05JHPEviyF5/DF7HGkjuQpCx2i3THDA8RNw3Tv069qR0kk0i4uFilF3bSXTwE2a+YGzIAUQnnIPByN46kbjUPc6YNKC9X+SLtjeB9Mt5ZyqzbI1lQzK5R2C/KWGAT8w6dcjHUVzemSyxeCba9e5uWfzIgxl1aOTgXPebGw5BwR3ACdalna8+0R3Kx3nk6jc2MvkjTI2MH97zDnI+4uWOSny4z2Lqxmt/hytvHHIZhFHJsj0yJnyXDEfZ87M9cjPB56ik2y4xSsu7XcnjMsejeJszXBZZrgozaihZAYwQFfGIQM8Bvu9ehrTKuJtOg3y5gKs/8ApAy2Y5B846uMj8Tz/CaqzROH1C3EchW6uHGRZIVx9nHXs4yPvN1PydBV2Lc+u3eUkCJBDtZoAFJzJna/VjgjI/h/4EaaM5PS/wDXRF+iiirOcy7RZE8RanukmKPFA6q9yHVfvg7Y+qfd5J4Y9OhrJ8Nm4FxYJNNdORaXCsJtTS4yVnAyQoG5sfxfw/dPJrUslmGt6rcymQxM0MMStahMALk4cfM65c9cBSGA7mqmi2k9t/ZZuDJJKbSdpHNjHEdzyRvhtv3G5PyjIbBJOVqDpurP5d+zGWtzeLdahZQLNK895PiV7+N/IURKQQMEqNzKAm1tucnggG4Fkhs4INNuJL145oEuHa7BcIAuWOQRkrhiAFzkkEE8rNDJPczWtnG1rG7yfapvsqESsY1AIJPP3hztbPllTjHKvpv2CWKbTUSHdJCk8cVtH+8jUFOSNpGAynOTgJgL1BNRNxf9fmWNGa3fR7U2ty9zDswJZJxMzHvlwSCc5BwccVerBZ3tLf8Atqyguvsxt/n02OyUSsxk3FsHa24bnyM4ycgE9d1WDDIz1I5BHQ471SM6i1uY8cjjSIWLyFjLbDJu1J5Mefn6HryP4snH3hRdmTS5f7T82aS2WBY5Y5LpVjX94Pn+YYzhmydw4UDB4wio/wDY8K+XLuE1qcfY1B4MWfk6DHOT/Bg4+6K1zGkkHlSoroy7WVlGCCOQRSsU5JGUrPo96EZ5p7O7mmmlubm7XFqQoYIobB2fK3Aztwe3TQvX2QKQSMyxDiQJ1dR1P8u/TvWb5bWty2m3KTXlvqMs7Ky2ieVbqVBKSEcHJL4JB3ZwfeLfcQRnTpY7mT7PLZiOdbJSkillzgA44KMScLsBBAOBkuNxu0/6epcQv9s0+LfJzauxBuVJJDRckdXPJ+YcDJH8QrTrN2sNcsxsk2LZyjd9nXaCWj439VPH3RwcZ/hFaVNGc+gUUUUyAooooAxfFVxZxeG9RS7ltVQ25ytwzbCCQBkJ82MkD5eeah0G7tYLi/sZJrKO4fUbjy4oncM/SQ5D9Ww4J25Xniiis2/eO2FNOhf1/CxmamsEGp2+nzx2CwxajYyWKN9o3KdpX+Ebd2I2287ePm973i3UdMg0W+0y4mtUkNqJPKuBJ5ewuEBYx8gbiBxz+FFFS3ZM1p01OdNN/wBaFfVtU0qHxHplx9qsSkEl2853SvIjxwjdtCZXIX7wbt05q7YJdS6nc6vYxWEtpfi2KS+bKsjQhCSWBBG4bhtAAyCcmiinF3k0RWgqdKMl1VvxZfM2s+VkWFh5nl5x9tfG/djGfK6bec468Y/irN1CeUCeHXbrTItPc3O6GN5PNltxHx0I+YKXLYB42455ooqpaIwo2lO1rE1rZjUYrcCO2Giw/Zp7BYzKrkKu4bh8oAB2EA56HIz0u3uxL+1kbygQpQFt2fmki4GOOuOvfHbNFFPpcTu6nKFpsTU75V8oNJKXIXduOI4hk54zjHTjGO+a0KKKaM59PQxLqNNBMuoQpDFYJEfPQLISp8zcWAXIxh5CcLnOMnHQaFNKkkvtOFoli0k9xqP32Zn24LIFyM7kwRg98c5yUVL3No6xTfV2/IrxyW0vh21kQ2phaexKn97s5aErj+LPTbnj7u7+KujoopxZNZW+9/oQXdpBfWk1rcxiSGVGjdSSMhgQeRyOCRxWOtvbXU0Ol6mIHksJbea0IMoJYISGy3U5ST+JuB83uUUMVJuz121JnaL/AITOBSYPOGnyEA7/ADdvmJnH8G3IHX5s4xxmtmiihdRVFZR9AoooqjI//9k='
|
||||||
|
|
||||||
|
response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) #.split('base64,')[1])
|
||||||
|
print(response.content)
|
||||||
3
unfinished/theb.ai/README.md
Normal file
3
unfinished/theb.ai/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
https://chatbot.theb.ai/
|
||||||
|
to do:
|
||||||
|
- code refractoring
|
||||||
57
unfinished/theb.ai/__init__.py
Normal file
57
unfinished/theb.ai/__init__.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
from curl_cffi import requests
|
||||||
|
from json import loads
|
||||||
|
from re import findall
|
||||||
|
from threading import Thread
|
||||||
|
from queue import Queue, Empty
|
||||||
|
|
||||||
|
class Completion:
|
||||||
|
# experimental
|
||||||
|
part1 = '{"role":"assistant","id":"chatcmpl'
|
||||||
|
part2 = '"},"index":0,"finish_reason":null}]}}'
|
||||||
|
regex = rf'{part1}(.*){part2}'
|
||||||
|
|
||||||
|
timer = None
|
||||||
|
message_queue = Queue()
|
||||||
|
stream_completed = False
|
||||||
|
|
||||||
|
def request():
|
||||||
|
headers = {
|
||||||
|
'authority' : 'chatbot.theb.ai',
|
||||||
|
'content-type': 'application/json',
|
||||||
|
'origin' : 'https://chatbot.theb.ai',
|
||||||
|
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
|
||||||
|
requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers, content_callback=Completion.handle_stream_response,
|
||||||
|
json = {
|
||||||
|
'prompt' : 'hello world',
|
||||||
|
'options': {}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
Completion.stream_completed = True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create():
|
||||||
|
Thread(target=Completion.request).start()
|
||||||
|
|
||||||
|
while Completion.stream_completed != True or not Completion.message_queue.empty():
|
||||||
|
try:
|
||||||
|
message = Completion.message_queue.get(timeout=0.01)
|
||||||
|
for message in findall(Completion.regex, message):
|
||||||
|
yield loads(Completion.part1 + message + Completion.part2)
|
||||||
|
|
||||||
|
except Empty:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def handle_stream_response(response):
|
||||||
|
Completion.message_queue.put(response.decode())
|
||||||
|
|
||||||
|
def start():
|
||||||
|
for message in Completion.create():
|
||||||
|
yield message['delta']
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
for message in start():
|
||||||
|
print(message)
|
||||||
29
unfinished/vercelai/v2.py
Normal file
29
unfinished/vercelai/v2.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
token = requests.get('https://play.vercel.ai/openai.jpeg', headers={
|
||||||
|
'authority': 'play.vercel.ai',
|
||||||
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'referer': 'https://play.vercel.ai/',
|
||||||
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}).text + '.'
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'authority': 'play.vercel.ai',
|
||||||
|
'custom-encoding': token,
|
||||||
|
'origin': 'https://play.vercel.ai',
|
||||||
|
'referer': 'https://play.vercel.ai/',
|
||||||
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream = True, json = {
|
||||||
|
'prompt': 'hi',
|
||||||
|
'model': 'openai:gpt-3.5-turbo',
|
||||||
|
'temperature': 0.7,
|
||||||
|
'maxTokens': 200,
|
||||||
|
'topK': 1,
|
||||||
|
'topP': 1,
|
||||||
|
'frequencyPenalty': 1,
|
||||||
|
'presencePenalty': 1,
|
||||||
|
'stopSequences': []}).iter_lines():
|
||||||
|
|
||||||
|
print(chunk)
|
||||||
53
unfinished/writesonic/README.md
Normal file
53
unfinished/writesonic/README.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
### Example: `writesonic` (use like openai pypi package) <a name="example-writesonic"></a>
|
||||||
|
|
||||||
|
```python
|
||||||
|
# import writesonic
|
||||||
|
import writesonic
|
||||||
|
|
||||||
|
# create account (3-4s)
|
||||||
|
account = writesonic.Account.create(logging = True)
|
||||||
|
|
||||||
|
# with loging:
|
||||||
|
# 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
|
||||||
|
# 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
|
||||||
|
# 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
|
||||||
|
# 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
|
||||||
|
|
||||||
|
# simple completion
|
||||||
|
response = writesonic.Completion.create(
|
||||||
|
api_key = account.key,
|
||||||
|
prompt = 'hello world'
|
||||||
|
)
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text) # Hello! How may I assist you today?
|
||||||
|
|
||||||
|
# conversation
|
||||||
|
|
||||||
|
response = writesonic.Completion.create(
|
||||||
|
api_key = account.key,
|
||||||
|
prompt = 'what is my name ?',
|
||||||
|
enable_memory = True,
|
||||||
|
history_data = [
|
||||||
|
{
|
||||||
|
'is_sent': True,
|
||||||
|
'message': 'my name is Tekky'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'is_sent': False,
|
||||||
|
'message': 'hello Tekky'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text) # Your name is Tekky.
|
||||||
|
|
||||||
|
# enable internet
|
||||||
|
|
||||||
|
response = writesonic.Completion.create(
|
||||||
|
api_key = account.key,
|
||||||
|
prompt = 'who won the quatar world cup ?',
|
||||||
|
enable_google_results = True
|
||||||
|
)
|
||||||
|
|
||||||
|
print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
|
||||||
|
```
|
||||||
36
you/README.md
Normal file
36
you/README.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
### Example: `you` (use like openai pypi package) <a name="example-you"></a>
|
||||||
|
|
||||||
|
```python
|
||||||
|
import you
|
||||||
|
|
||||||
|
# simple request with links and details
|
||||||
|
response = you.Completion.create(
|
||||||
|
prompt="hello world",
|
||||||
|
detailed=True,
|
||||||
|
include_links=True, )
|
||||||
|
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
# {
|
||||||
|
# "response": "...",
|
||||||
|
# "links": [...],
|
||||||
|
# "extra": {...},
|
||||||
|
# "slots": {...}
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
|
||||||
|
# chatbot
|
||||||
|
|
||||||
|
chat = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
prompt = input("You: ")
|
||||||
|
|
||||||
|
response = you.Completion.create(
|
||||||
|
prompt=prompt,
|
||||||
|
chat=chat)
|
||||||
|
|
||||||
|
print("Bot:", response["response"])
|
||||||
|
|
||||||
|
chat.append({"question": prompt, "answer": response["response"]})
|
||||||
|
```
|
||||||
146
you/__init__.py
146
you/__init__.py
@@ -1,77 +1,99 @@
|
|||||||
from tls_client import Session
|
import re
|
||||||
from re import findall
|
from json import loads
|
||||||
from json import loads, dumps
|
from uuid import uuid4
|
||||||
from uuid import uuid4
|
|
||||||
|
from fake_useragent import UserAgent
|
||||||
|
from tls_client import Session
|
||||||
|
|
||||||
|
|
||||||
class Completion:
|
class Completion:
|
||||||
|
@staticmethod
|
||||||
def create(
|
def create(
|
||||||
prompt : str,
|
prompt: str,
|
||||||
page : int = 1,
|
page: int = 1,
|
||||||
count : int = 10,
|
count: int = 10,
|
||||||
safeSearch : str = "Moderate",
|
safe_search: str = 'Moderate',
|
||||||
onShoppingpage : bool = False,
|
on_shopping_page: bool = False,
|
||||||
mkt : str = "",
|
mkt: str = '',
|
||||||
responseFilter : str = "WebPages,Translations,TimeZone,Computation,RelatedSearches",
|
response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
|
||||||
domain : str = "youchat",
|
domain: str = 'youchat',
|
||||||
queryTraceId : str = None,
|
query_trace_id: str = None,
|
||||||
chat : list = [],
|
chat: list = None,
|
||||||
includelinks : bool = False,
|
include_links: bool = False,
|
||||||
detailed : bool = False,
|
detailed: bool = False,
|
||||||
debug : bool = False ) -> dict:
|
debug: bool = False,
|
||||||
|
) -> dict:
|
||||||
client = Session(client_identifier="chrome_108")
|
if chat is None:
|
||||||
client.headers = {
|
chat = []
|
||||||
"authority" : "you.com",
|
|
||||||
"accept" : "text/event-stream",
|
|
||||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
|
||||||
"cache-control" : "no-cache",
|
|
||||||
"referer" : "https://you.com/search?q=who+are+you&tbm=youchat",
|
|
||||||
"sec-ch-ua" : '"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"',
|
|
||||||
"sec-ch-ua-mobile" : "?0",
|
|
||||||
"sec-ch-ua-platform": '"Windows"',
|
|
||||||
"sec-fetch-dest" : "empty",
|
|
||||||
"sec-fetch-mode" : "cors",
|
|
||||||
"sec-fetch-site" : "same-origin",
|
|
||||||
'cookie' : f'safesearch_guest=Moderate; uuid_guest={str(uuid4())}',
|
|
||||||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = client.get(f"https://you.com/api/streamingSearch", params = {
|
client = Session(client_identifier='chrome_108')
|
||||||
"q" : prompt,
|
client.headers = Completion.__get_headers()
|
||||||
"page" : page,
|
|
||||||
"count" : count,
|
response = client.get(
|
||||||
"safeSearch" : safeSearch,
|
f'https://you.com/api/streamingSearch',
|
||||||
"onShoppingPage" : onShoppingpage,
|
params={
|
||||||
"mkt" : mkt,
|
'q': prompt,
|
||||||
"responseFilter" : responseFilter,
|
'page': page,
|
||||||
"domain" : domain,
|
'count': count,
|
||||||
"queryTraceId" : str(uuid4()) if queryTraceId is None else queryTraceId,
|
'safeSearch': safe_search,
|
||||||
"chat" : str(chat), # {"question":"","answer":" '"}
|
'onShoppingPage': on_shopping_page,
|
||||||
}
|
'mkt': mkt,
|
||||||
|
'responseFilter': response_filter,
|
||||||
|
'domain': domain,
|
||||||
|
'queryTraceId': str(uuid4()) if query_trace_id is None else query_trace_id,
|
||||||
|
'chat': str(chat), # {'question':'','answer':' ''}
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
print('\n\n------------------\n\n')
|
print('\n\n------------------\n\n')
|
||||||
print(response.text)
|
print(response.text)
|
||||||
print('\n\n------------------\n\n')
|
print('\n\n------------------\n\n')
|
||||||
|
|
||||||
youChatSerpResults = findall(r'youChatSerpResults\ndata: (.*)\n\nevent', response.text)[0]
|
if 'youChatToken' not in response.text:
|
||||||
thirdPartySearchResults = findall(r"thirdPartySearchResults\ndata: (.*)\n\nevent", response.text)[0]
|
return Completion.__get_failure_response()
|
||||||
slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
|
|
||||||
|
you_chat_serp_results = re.search(
|
||||||
text = response.text.split('}]}\n\nevent: youChatToken\ndata: {"youChatToken": "')[-1]
|
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
|
||||||
text = text.replace('"}\n\nevent: youChatToken\ndata: {"youChatToken": "', '')
|
).group()
|
||||||
text = text.replace('event: done\ndata: I\'m Mr. Meeseeks. Look at me.\n\n', '')
|
third_party_search_results = re.search(
|
||||||
|
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
|
||||||
|
).group()
|
||||||
|
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
|
||||||
|
|
||||||
|
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
|
||||||
|
|
||||||
extra = {
|
extra = {
|
||||||
'youChatSerpResults' : loads(youChatSerpResults),
|
'youChatSerpResults': loads(you_chat_serp_results),
|
||||||
'slots' : loads(slots)
|
# 'slots' : loads(slots)
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'response': text,
|
'response': text.replace('\\n', '\n').replace('\\\\', '\\'),
|
||||||
'links' : loads(thirdPartySearchResults)['search']["third_party_search_results"] if includelinks else None,
|
'links': loads(third_party_search_results)['search']['third_party_search_results']
|
||||||
'extra' : extra if detailed else None,
|
if include_links
|
||||||
}
|
else None,
|
||||||
|
'extra': extra if detailed else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __get_headers(cls) -> dict:
|
||||||
|
return {
|
||||||
|
'authority': 'you.com',
|
||||||
|
'accept': 'text/event-stream',
|
||||||
|
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
|
'cache-control': 'no-cache',
|
||||||
|
'referer': 'https://you.com/search?q=who+are+you&tbm=youchat',
|
||||||
|
'sec-ch-ua': '"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"',
|
||||||
|
'sec-ch-ua-mobile': '?0',
|
||||||
|
'sec-ch-ua-platform': '"Windows"',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'same-origin',
|
||||||
|
'cookie': f'safesearch_guest=Moderate; uuid_guest={str(uuid4())}',
|
||||||
|
'user-agent': UserAgent().random,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __get_failure_response(cls) -> dict:
|
||||||
|
return dict(response='Unable to fetch the response, Please try again.', links=[], extra={})
|
||||||
|
|||||||
Reference in New Issue
Block a user