Aymane Hrouch commited on
Commit
f1594cf
1 Parent(s): 86e0fbe

Reformat code using PyCharm

Browse files
forefront/__init__.py CHANGED
@@ -1,51 +1,54 @@
1
- from tls_client import Session
2
- from forefront.mail import Mail
3
- from time import time, sleep
4
  from re import match
5
- from forefront.typing import ForeFrontResponse
6
  from uuid import uuid4
 
7
  from requests import post
8
- from json import loads
 
 
 
9
 
10
 
11
  class Account:
12
  @staticmethod
13
- def create(proxy = None, logging = False):
14
-
15
  proxies = {
16
  'http': 'http://' + proxy,
17
- 'https': 'http://' + proxy } if proxy else False
18
-
19
  start = time()
20
 
21
- mail = Mail(proxies)
22
- mail_token = None
23
- mail_adress = mail.get_mail()
24
-
25
- #print(mail_adress)
26
-
27
- client = Session(client_identifier='chrome110')
28
  client.proxies = proxies
29
  client.headers = {
30
  "origin": "https://accounts.forefront.ai",
31
- "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
32
  }
33
-
34
- response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
35
- data = {
36
- "email_address": mail_adress
37
- }
38
- )
39
-
40
  trace_token = response.json()['response']['id']
41
  if logging: print(trace_token)
42
 
43
- response = client.post(f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
44
- data = {
45
- "strategy" : "email_code",
 
46
  }
47
- )
48
-
49
  if logging: print(response.text)
50
 
51
  if not 'sign_up_attempt' in response.text:
@@ -59,89 +62,91 @@ class Account:
59
 
60
  if mail_token:
61
  break
62
-
63
  if logging: print(mail_token)
64
-
65
- response = client.post(f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4', data = {
66
- 'code': mail_token,
67
- 'strategy': 'email_code'
68
- })
69
-
 
 
70
  if logging: print(response.json())
71
-
72
- token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
73
-
74
  with open('accounts.txt', 'a') as f:
75
  f.write(f'{mail_adress}:{token}\n')
76
-
77
  if logging: print(time() - start)
78
-
79
  return token
80
 
81
 
82
  class StreamingCompletion:
83
  @staticmethod
84
  def create(
85
- token = None,
86
- chatId = None,
87
- prompt = '',
88
- actionType = 'new',
89
- defaultPersona = '607e41fe-95be-497e-8e97-010a59b2e2c0', # default
90
- model = 'gpt-4') -> ForeFrontResponse:
91
-
92
  if not token: raise Exception('Token is required!')
93
  if not chatId: chatId = str(uuid4())
94
-
95
  headers = {
96
- 'authority' : 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
97
- 'accept' : '*/*',
98
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
99
- 'authorization' : 'Bearer ' + token,
100
- 'cache-control' : 'no-cache',
101
- 'content-type' : 'application/json',
102
- 'origin' : 'https://chat.forefront.ai',
103
- 'pragma' : 'no-cache',
104
- 'referer' : 'https://chat.forefront.ai/',
105
- 'sec-ch-ua' : '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
106
- 'sec-ch-ua-mobile' : '?0',
107
  'sec-ch-ua-platform': '"macOS"',
108
- 'sec-fetch-dest' : 'empty',
109
- 'sec-fetch-mode' : 'cors',
110
- 'sec-fetch-site' : 'cross-site',
111
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
112
  }
113
 
114
  json_data = {
115
- 'text' : prompt,
116
- 'action' : actionType,
117
- 'parentId' : chatId,
118
- 'workspaceId' : chatId,
119
- 'messagePersona' : defaultPersona,
120
- 'model' : model
121
  }
122
 
123
  for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
124
- headers=headers, json=json_data, stream=True).iter_lines():
125
-
126
  if b'finish_reason":null' in chunk:
127
- data = loads(chunk.decode('utf-8').split('data: ')[1])
128
  token = data['choices'][0]['delta'].get('content')
129
-
130
  if token != None:
131
  yield ForeFrontResponse({
132
- 'id' : chatId,
133
- 'object' : 'text_completion',
134
- 'created': int(time()),
135
- 'model' : model,
136
- 'choices': [{
137
- 'text' : token,
138
- 'index' : 0,
139
- 'logprobs' : None,
140
- 'finish_reason' : 'stop'
141
- }],
142
- 'usage': {
143
- 'prompt_tokens' : len(prompt),
144
- 'completion_tokens' : len(token),
145
- 'total_tokens' : len(prompt) + len(token)
146
- }
147
- })
 
1
+ from json import loads
 
 
2
  from re import match
3
+ from time import time, sleep
4
  from uuid import uuid4
5
+
6
  from requests import post
7
+ from tls_client import Session
8
+
9
+ from forefront.mail import Mail
10
+ from forefront.typing import ForeFrontResponse
11
 
12
 
13
  class Account:
14
  @staticmethod
15
+ def create(proxy=None, logging=False):
16
+
17
  proxies = {
18
  'http': 'http://' + proxy,
19
+ 'https': 'http://' + proxy} if proxy else False
20
+
21
  start = time()
22
 
23
+ mail = Mail(proxies)
24
+ mail_token = None
25
+ mail_adress = mail.get_mail()
26
+
27
+ # print(mail_adress)
28
+
29
+ client = Session(client_identifier='chrome110')
30
  client.proxies = proxies
31
  client.headers = {
32
  "origin": "https://accounts.forefront.ai",
33
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
34
  }
35
+
36
+ response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
37
+ data={
38
+ "email_address": mail_adress
39
+ }
40
+ )
41
+
42
  trace_token = response.json()['response']['id']
43
  if logging: print(trace_token)
44
 
45
+ response = client.post(
46
+ f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
47
+ data={
48
+ "strategy": "email_code",
49
  }
50
+ )
51
+
52
  if logging: print(response.text)
53
 
54
  if not 'sign_up_attempt' in response.text:
 
62
 
63
  if mail_token:
64
  break
65
+
66
  if logging: print(mail_token)
67
+
68
+ response = client.post(
69
+ f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
70
+ data={
71
+ 'code': mail_token,
72
+ 'strategy': 'email_code'
73
+ })
74
+
75
  if logging: print(response.json())
76
+
77
+ token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
78
+
79
  with open('accounts.txt', 'a') as f:
80
  f.write(f'{mail_adress}:{token}\n')
81
+
82
  if logging: print(time() - start)
83
+
84
  return token
85
 
86
 
87
  class StreamingCompletion:
88
  @staticmethod
89
  def create(
90
+ token=None,
91
+ chatId=None,
92
+ prompt='',
93
+ actionType='new',
94
+ defaultPersona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
95
+ model='gpt-4') -> ForeFrontResponse:
96
+
97
  if not token: raise Exception('Token is required!')
98
  if not chatId: chatId = str(uuid4())
99
+
100
  headers = {
101
+ 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
102
+ 'accept': '*/*',
103
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
104
+ 'authorization': 'Bearer ' + token,
105
+ 'cache-control': 'no-cache',
106
+ 'content-type': 'application/json',
107
+ 'origin': 'https://chat.forefront.ai',
108
+ 'pragma': 'no-cache',
109
+ 'referer': 'https://chat.forefront.ai/',
110
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
111
+ 'sec-ch-ua-mobile': '?0',
112
  'sec-ch-ua-platform': '"macOS"',
113
+ 'sec-fetch-dest': 'empty',
114
+ 'sec-fetch-mode': 'cors',
115
+ 'sec-fetch-site': 'cross-site',
116
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
117
  }
118
 
119
  json_data = {
120
+ 'text': prompt,
121
+ 'action': actionType,
122
+ 'parentId': chatId,
123
+ 'workspaceId': chatId,
124
+ 'messagePersona': defaultPersona,
125
+ 'model': model
126
  }
127
 
128
  for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
129
+ headers=headers, json=json_data, stream=True).iter_lines():
130
+
131
  if b'finish_reason":null' in chunk:
132
+ data = loads(chunk.decode('utf-8').split('data: ')[1])
133
  token = data['choices'][0]['delta'].get('content')
134
+
135
  if token != None:
136
  yield ForeFrontResponse({
137
+ 'id': chatId,
138
+ 'object': 'text_completion',
139
+ 'created': int(time()),
140
+ 'model': model,
141
+ 'choices': [{
142
+ 'text': token,
143
+ 'index': 0,
144
+ 'logprobs': None,
145
+ 'finish_reason': 'stop'
146
+ }],
147
+ 'usage': {
148
+ 'prompt_tokens': len(prompt),
149
+ 'completion_tokens': len(token),
150
+ 'total_tokens': len(prompt) + len(token)
151
+ }
152
+ })
forefront/mail.py CHANGED
@@ -1,6 +1,8 @@
1
- from requests import Session
2
- from string import ascii_letters
3
  from random import choices
 
 
 
 
4
 
5
  class Mail:
6
  def __init__(self, proxies: dict = None) -> None:
@@ -23,27 +25,27 @@ class Mail:
23
  "accept-encoding": "gzip, deflate, br",
24
  "accept-language": "en-GB,en-US;q=0.9,en;q=0.8"
25
  }
26
-
27
  def get_mail(self) -> str:
28
  token = ''.join(choices(ascii_letters, k=14)).lower()
29
- init = self.client.post("https://api.mail.tm/accounts", json={
30
- "address" : f"{token}@bugfoo.com",
31
  "password": token
32
  })
33
-
34
  if init.status_code == 201:
35
- resp = self.client.post("https://api.mail.tm/token", json = {
36
  **init.json(),
37
  "password": token
38
  })
39
-
40
  self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']
41
-
42
  return f"{token}@bugfoo.com"
43
-
44
  else:
45
  raise Exception("Failed to create email")
46
-
47
  def fetch_inbox(self):
48
  return self.client.get(f"https://api.mail.tm/messages").json()["hydra:member"]
49
 
@@ -52,4 +54,3 @@ class Mail:
52
 
53
  def get_message_content(self, message_id: str):
54
  return self.get_message(message_id)["text"]
55
-
 
 
 
1
  from random import choices
2
+ from string import ascii_letters
3
+
4
+ from requests import Session
5
+
6
 
7
  class Mail:
8
  def __init__(self, proxies: dict = None) -> None:
 
25
  "accept-encoding": "gzip, deflate, br",
26
  "accept-language": "en-GB,en-US;q=0.9,en;q=0.8"
27
  }
28
+
29
  def get_mail(self) -> str:
30
  token = ''.join(choices(ascii_letters, k=14)).lower()
31
+ init = self.client.post("https://api.mail.tm/accounts", json={
32
+ "address": f"{token}@bugfoo.com",
33
  "password": token
34
  })
35
+
36
  if init.status_code == 201:
37
+ resp = self.client.post("https://api.mail.tm/token", json={
38
  **init.json(),
39
  "password": token
40
  })
41
+
42
  self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']
43
+
44
  return f"{token}@bugfoo.com"
45
+
46
  else:
47
  raise Exception("Failed to create email")
48
+
49
  def fetch_inbox(self):
50
  return self.client.get(f"https://api.mail.tm/messages").json()["hydra:member"]
51
 
 
54
 
55
  def get_message_content(self, message_id: str):
56
  return self.get_message(message_id)["text"]
 
forefront/typing.py CHANGED
@@ -2,12 +2,12 @@ class ForeFrontResponse:
2
  class Completion:
3
  class Choices:
4
  def __init__(self, choice: dict) -> None:
5
- self.text = choice['text']
6
- self.content = self.text.encode()
7
- self.index = choice['index']
8
- self.logprobs = choice['logprobs']
9
- self.finish_reason = choice['finish_reason']
10
-
11
  def __repr__(self) -> str:
12
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
13
 
@@ -16,22 +16,21 @@ class ForeFrontResponse:
16
 
17
  class Usage:
18
  def __init__(self, usage_dict: dict) -> None:
19
- self.prompt_tokens = usage_dict['prompt_tokens']
20
- self.completion_tokens = usage_dict['completion_tokens']
21
- self.total_tokens = usage_dict['total_tokens']
22
 
23
  def __repr__(self):
24
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
25
-
26
  def __init__(self, response_dict: dict) -> None:
27
-
28
- self.response_dict = response_dict
29
- self.id = response_dict['id']
30
- self.object = response_dict['object']
31
- self.created = response_dict['created']
32
- self.model = response_dict['model']
33
- self.completion = self.Completion(response_dict['choices'])
34
- self.usage = self.Usage(response_dict['usage'])
35
 
36
  def json(self) -> dict:
37
- return self.response_dict
 
2
  class Completion:
3
  class Choices:
4
  def __init__(self, choice: dict) -> None:
5
+ self.text = choice['text']
6
+ self.content = self.text.encode()
7
+ self.index = choice['index']
8
+ self.logprobs = choice['logprobs']
9
+ self.finish_reason = choice['finish_reason']
10
+
11
  def __repr__(self) -> str:
12
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
13
 
 
16
 
17
  class Usage:
18
  def __init__(self, usage_dict: dict) -> None:
19
+ self.prompt_tokens = usage_dict['prompt_tokens']
20
+ self.completion_tokens = usage_dict['completion_tokens']
21
+ self.total_tokens = usage_dict['total_tokens']
22
 
23
  def __repr__(self):
24
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
25
+
26
  def __init__(self, response_dict: dict) -> None:
27
+ self.response_dict = response_dict
28
+ self.id = response_dict['id']
29
+ self.object = response_dict['object']
30
+ self.created = response_dict['created']
31
+ self.model = response_dict['model']
32
+ self.completion = self.Completion(response_dict['choices'])
33
+ self.usage = self.Usage(response_dict['usage'])
 
34
 
35
  def json(self) -> dict:
36
+ return self.response_dict
phind/__init__.py CHANGED
@@ -1,27 +1,25 @@
 
 
 
 
1
  from urllib.parse import quote
2
- from time import time
3
- from datetime import datetime
4
- from queue import Queue, Empty
5
- from threading import Thread
6
- from re import findall
7
 
8
  from curl_cffi.requests import post
9
 
10
  cf_clearance = ''
11
- user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
 
12
 
13
  class PhindResponse:
14
-
15
  class Completion:
16
-
17
  class Choices:
18
  def __init__(self, choice: dict) -> None:
19
- self.text = choice['text']
20
- self.content = self.text.encode()
21
- self.index = choice['index']
22
- self.logprobs = choice['logprobs']
23
- self.finish_reason = choice['finish_reason']
24
-
25
  def __repr__(self) -> str:
26
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
27
 
@@ -30,34 +28,33 @@ class PhindResponse:
30
 
31
  class Usage:
32
  def __init__(self, usage_dict: dict) -> None:
33
- self.prompt_tokens = usage_dict['prompt_tokens']
34
- self.completion_tokens = usage_dict['completion_tokens']
35
- self.total_tokens = usage_dict['total_tokens']
36
 
37
  def __repr__(self):
38
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
39
-
40
  def __init__(self, response_dict: dict) -> None:
41
-
42
- self.response_dict = response_dict
43
- self.id = response_dict['id']
44
- self.object = response_dict['object']
45
- self.created = response_dict['created']
46
- self.model = response_dict['model']
47
- self.completion = self.Completion(response_dict['choices'])
48
- self.usage = self.Usage(response_dict['usage'])
49
 
50
  def json(self) -> dict:
51
  return self.response_dict
52
 
53
 
54
  class Search:
55
- def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
56
  if user_agent == '':
57
  raise ValueError('user_agent must be set, refer to documentation')
58
- if cf_clearance == '' :
59
  raise ValueError('cf_clearance must be set, refer to documentation')
60
-
61
  if not actualSearch:
62
  return {
63
  '_type': 'SearchResponse',
@@ -75,7 +72,7 @@ class Search:
75
  }
76
  }
77
  }
78
-
79
  headers = {
80
  'authority': 'www.phind.com',
81
  'accept': '*/*',
@@ -91,8 +88,8 @@ class Search:
91
  'sec-fetch-site': 'same-origin',
92
  'user-agent': user_agent
93
  }
94
-
95
- return post('https://www.phind.com/api/bing/search', headers = headers, json = {
96
  'q': prompt,
97
  'userRankList': {},
98
  'browserLanguage': language}).json()['rawBingResults']
@@ -100,45 +97,45 @@ class Search:
100
 
101
  class Completion:
102
  def create(
103
- model = 'gpt-4',
104
- prompt: str = '',
105
- results: dict = None,
106
- creative: bool = False,
107
- detailed: bool = False,
108
- codeContext: str = '',
109
- language: str = 'en') -> PhindResponse:
110
-
111
- if user_agent == '' :
112
  raise ValueError('user_agent must be set, refer to documentation')
113
 
114
- if cf_clearance == '' :
115
  raise ValueError('cf_clearance must be set, refer to documentation')
116
-
117
  if results is None:
118
- results = Search.create(prompt, actualSearch = True)
119
-
120
  if len(codeContext) > 2999:
121
  raise ValueError('codeContext must be less than 3000 characters')
122
-
123
  models = {
124
- 'gpt-4' : 'expert',
125
- 'gpt-3.5-turbo' : 'intermediate',
126
  'gpt-3.5': 'intermediate',
127
  }
128
-
129
  json_data = {
130
- 'question' : prompt,
131
- 'bingResults' : results, #response.json()['rawBingResults'],
132
- 'codeContext' : codeContext,
133
  'options': {
134
- 'skill' : models[model],
135
- 'date' : datetime.now().strftime("%d/%m/%Y"),
136
  'language': language,
137
  'detailed': detailed,
138
  'creative': creative
139
  }
140
  }
141
-
142
  headers = {
143
  'authority': 'www.phind.com',
144
  'accept': '*/*',
@@ -155,50 +152,51 @@ class Completion:
155
  'sec-fetch-site': 'same-origin',
156
  'user-agent': user_agent
157
  }
158
-
159
  completion = ''
160
- response = post('https://www.phind.com/api/infer/answer', headers = headers, json = json_data, timeout=99999, impersonate='chrome110')
 
161
  for line in response.text.split('\r\n\r\n'):
162
  completion += (line.replace('data: ', ''))
163
-
164
  return PhindResponse({
165
- 'id' : f'cmpl-1337-{int(time())}',
166
- 'object' : 'text_completion',
167
- 'created': int(time()),
168
- 'model' : models[model],
169
  'choices': [{
170
- 'text' : completion,
171
- 'index' : 0,
172
- 'logprobs' : None,
173
- 'finish_reason' : 'stop'
174
- }],
175
  'usage': {
176
- 'prompt_tokens' : len(prompt),
177
- 'completion_tokens' : len(completion),
178
- 'total_tokens' : len(prompt) + len(completion)
179
  }
180
  })
181
-
182
 
183
  class StreamingCompletion:
184
- message_queue = Queue()
185
  stream_completed = False
186
-
187
  def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
188
-
189
  models = {
190
- 'gpt-4' : 'expert',
191
- 'gpt-3.5-turbo' : 'intermediate',
192
  'gpt-3.5': 'intermediate',
193
  }
194
 
195
  json_data = {
196
- 'question' : prompt,
197
- 'bingResults' : results,
198
- 'codeContext' : codeContext,
199
  'options': {
200
- 'skill' : models[model],
201
- 'date' : datetime.now().strftime("%d/%m/%Y"),
202
  'language': language,
203
  'detailed': detailed,
204
  'creative': creative
@@ -221,65 +219,65 @@ class StreamingCompletion:
221
  'sec-fetch-site': 'same-origin',
222
  'user-agent': user_agent
223
  }
224
-
225
- response = post('https://www.phind.com/api/infer/answer',
226
- headers = headers, json = json_data, timeout=99999, impersonate='chrome110', content_callback=StreamingCompletion.handle_stream_response)
227
 
 
 
 
228
 
229
  StreamingCompletion.stream_completed = True
230
 
231
  @staticmethod
232
  def create(
233
- model : str = 'gpt-4',
234
- prompt : str = '',
235
- results : dict = None,
236
- creative : bool = False,
237
- detailed : bool = False,
238
- codeContext : str = '',
239
- language : str = 'en'):
240
-
241
  if user_agent == '':
242
  raise ValueError('user_agent must be set, refer to documentation')
243
- if cf_clearance == '' :
244
  raise ValueError('cf_clearance must be set, refer to documentation')
245
-
246
  if results is None:
247
- results = Search.create(prompt, actualSearch = True)
248
-
249
  if len(codeContext) > 2999:
250
  raise ValueError('codeContext must be less than 3000 characters')
251
-
252
- Thread(target = StreamingCompletion.request, args = [
253
  model, prompt, results, creative, detailed, codeContext, language]).start()
254
-
255
  while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
256
  try:
257
  chunk = StreamingCompletion.message_queue.get(timeout=0)
258
 
259
  if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
260
  chunk = b'data: \n\n\r\n\r\n'
261
-
262
  chunk = chunk.decode()
263
-
264
  chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
265
  chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
266
  chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
267
-
268
  yield PhindResponse({
269
- 'id' : f'cmpl-1337-{int(time())}',
270
- 'object' : 'text_completion',
271
- 'created': int(time()),
272
- 'model' : model,
273
  'choices': [{
274
- 'text' : chunk,
275
- 'index' : 0,
276
- 'logprobs' : None,
277
- 'finish_reason' : 'stop'
278
- }],
279
  'usage': {
280
- 'prompt_tokens' : len(prompt),
281
- 'completion_tokens' : len(chunk),
282
- 'total_tokens' : len(prompt) + len(chunk)
283
  }
284
  })
285
 
 
1
+ from datetime import datetime
2
+ from queue import Queue, Empty
3
+ from threading import Thread
4
+ from time import time
5
  from urllib.parse import quote
 
 
 
 
 
6
 
7
  from curl_cffi.requests import post
8
 
9
  cf_clearance = ''
10
+ user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
11
+
12
 
13
  class PhindResponse:
 
14
  class Completion:
 
15
  class Choices:
16
  def __init__(self, choice: dict) -> None:
17
+ self.text = choice['text']
18
+ self.content = self.text.encode()
19
+ self.index = choice['index']
20
+ self.logprobs = choice['logprobs']
21
+ self.finish_reason = choice['finish_reason']
22
+
23
  def __repr__(self) -> str:
24
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
25
 
 
28
 
29
  class Usage:
30
  def __init__(self, usage_dict: dict) -> None:
31
+ self.prompt_tokens = usage_dict['prompt_tokens']
32
+ self.completion_tokens = usage_dict['completion_tokens']
33
+ self.total_tokens = usage_dict['total_tokens']
34
 
35
  def __repr__(self):
36
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
37
+
38
  def __init__(self, response_dict: dict) -> None:
39
+ self.response_dict = response_dict
40
+ self.id = response_dict['id']
41
+ self.object = response_dict['object']
42
+ self.created = response_dict['created']
43
+ self.model = response_dict['model']
44
+ self.completion = self.Completion(response_dict['choices'])
45
+ self.usage = self.Usage(response_dict['usage'])
 
46
 
47
  def json(self) -> dict:
48
  return self.response_dict
49
 
50
 
51
  class Search:
52
+ def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
53
  if user_agent == '':
54
  raise ValueError('user_agent must be set, refer to documentation')
55
+ if cf_clearance == '':
56
  raise ValueError('cf_clearance must be set, refer to documentation')
57
+
58
  if not actualSearch:
59
  return {
60
  '_type': 'SearchResponse',
 
72
  }
73
  }
74
  }
75
+
76
  headers = {
77
  'authority': 'www.phind.com',
78
  'accept': '*/*',
 
88
  'sec-fetch-site': 'same-origin',
89
  'user-agent': user_agent
90
  }
91
+
92
+ return post('https://www.phind.com/api/bing/search', headers=headers, json={
93
  'q': prompt,
94
  'userRankList': {},
95
  'browserLanguage': language}).json()['rawBingResults']
 
97
 
98
  class Completion:
99
  def create(
100
+ model='gpt-4',
101
+ prompt: str = '',
102
+ results: dict = None,
103
+ creative: bool = False,
104
+ detailed: bool = False,
105
+ codeContext: str = '',
106
+ language: str = 'en') -> PhindResponse:
107
+
108
+ if user_agent == '':
109
  raise ValueError('user_agent must be set, refer to documentation')
110
 
111
+ if cf_clearance == '':
112
  raise ValueError('cf_clearance must be set, refer to documentation')
113
+
114
  if results is None:
115
+ results = Search.create(prompt, actualSearch=True)
116
+
117
  if len(codeContext) > 2999:
118
  raise ValueError('codeContext must be less than 3000 characters')
119
+
120
  models = {
121
+ 'gpt-4': 'expert',
122
+ 'gpt-3.5-turbo': 'intermediate',
123
  'gpt-3.5': 'intermediate',
124
  }
125
+
126
  json_data = {
127
+ 'question': prompt,
128
+ 'bingResults': results, # response.json()['rawBingResults'],
129
+ 'codeContext': codeContext,
130
  'options': {
131
+ 'skill': models[model],
132
+ 'date': datetime.now().strftime("%d/%m/%Y"),
133
  'language': language,
134
  'detailed': detailed,
135
  'creative': creative
136
  }
137
  }
138
+
139
  headers = {
140
  'authority': 'www.phind.com',
141
  'accept': '*/*',
 
152
  'sec-fetch-site': 'same-origin',
153
  'user-agent': user_agent
154
  }
155
+
156
  completion = ''
157
+ response = post('https://www.phind.com/api/infer/answer', headers=headers, json=json_data, timeout=99999,
158
+ impersonate='chrome110')
159
  for line in response.text.split('\r\n\r\n'):
160
  completion += (line.replace('data: ', ''))
161
+
162
  return PhindResponse({
163
+ 'id': f'cmpl-1337-{int(time())}',
164
+ 'object': 'text_completion',
165
+ 'created': int(time()),
166
+ 'model': models[model],
167
  'choices': [{
168
+ 'text': completion,
169
+ 'index': 0,
170
+ 'logprobs': None,
171
+ 'finish_reason': 'stop'
172
+ }],
173
  'usage': {
174
+ 'prompt_tokens': len(prompt),
175
+ 'completion_tokens': len(completion),
176
+ 'total_tokens': len(prompt) + len(completion)
177
  }
178
  })
179
+
180
 
181
  class StreamingCompletion:
182
+ message_queue = Queue()
183
  stream_completed = False
184
+
185
  def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
186
+
187
  models = {
188
+ 'gpt-4': 'expert',
189
+ 'gpt-3.5-turbo': 'intermediate',
190
  'gpt-3.5': 'intermediate',
191
  }
192
 
193
  json_data = {
194
+ 'question': prompt,
195
+ 'bingResults': results,
196
+ 'codeContext': codeContext,
197
  'options': {
198
+ 'skill': models[model],
199
+ 'date': datetime.now().strftime("%d/%m/%Y"),
200
  'language': language,
201
  'detailed': detailed,
202
  'creative': creative
 
219
  'sec-fetch-site': 'same-origin',
220
  'user-agent': user_agent
221
  }
 
 
 
222
 
223
+ response = post('https://www.phind.com/api/infer/answer',
224
+ headers=headers, json=json_data, timeout=99999, impersonate='chrome110',
225
+ content_callback=StreamingCompletion.handle_stream_response)
226
 
227
  StreamingCompletion.stream_completed = True
228
 
229
  @staticmethod
230
  def create(
231
+ model: str = 'gpt-4',
232
+ prompt: str = '',
233
+ results: dict = None,
234
+ creative: bool = False,
235
+ detailed: bool = False,
236
+ codeContext: str = '',
237
+ language: str = 'en'):
238
+
239
  if user_agent == '':
240
  raise ValueError('user_agent must be set, refer to documentation')
241
+ if cf_clearance == '':
242
  raise ValueError('cf_clearance must be set, refer to documentation')
243
+
244
  if results is None:
245
+ results = Search.create(prompt, actualSearch=True)
246
+
247
  if len(codeContext) > 2999:
248
  raise ValueError('codeContext must be less than 3000 characters')
249
+
250
+ Thread(target=StreamingCompletion.request, args=[
251
  model, prompt, results, creative, detailed, codeContext, language]).start()
252
+
253
  while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
254
  try:
255
  chunk = StreamingCompletion.message_queue.get(timeout=0)
256
 
257
  if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
258
  chunk = b'data: \n\n\r\n\r\n'
259
+
260
  chunk = chunk.decode()
261
+
262
  chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
263
  chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
264
  chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
265
+
266
  yield PhindResponse({
267
+ 'id': f'cmpl-1337-{int(time())}',
268
+ 'object': 'text_completion',
269
+ 'created': int(time()),
270
+ 'model': model,
271
  'choices': [{
272
+ 'text': chunk,
273
+ 'index': 0,
274
+ 'logprobs': None,
275
+ 'finish_reason': 'stop'
276
+ }],
277
  'usage': {
278
+ 'prompt_tokens': len(prompt),
279
+ 'completion_tokens': len(chunk),
280
+ 'total_tokens': len(prompt) + len(chunk)
281
  }
282
  })
283
 
quora/__init__.py CHANGED
@@ -116,11 +116,11 @@ class ModelResponse:
116
  class Model:
117
  @staticmethod
118
  def create(
119
- token: str,
120
- model: str = 'gpt-3.5-turbo', # claude-instant
121
- system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
122
- description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
123
- handle: str = None,
124
  ) -> ModelResponse:
125
  models = {
126
  'gpt-3.5-turbo': 'chinchilla',
@@ -202,9 +202,9 @@ class Model:
202
  class Account:
203
  @staticmethod
204
  def create(
205
- proxy: Optional[str] = None,
206
- logging: bool = False,
207
- enable_bot_creation: bool = False,
208
  ):
209
  client = TLS(client_identifier='chrome110')
210
  client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
@@ -309,10 +309,10 @@ class Account:
309
  class StreamingCompletion:
310
  @staticmethod
311
  def create(
312
- model: str = 'gpt-4',
313
- custom_model: bool = None,
314
- prompt: str = 'hello world',
315
- token: str = '',
316
  ):
317
  _model = MODELS[model] if not custom_model else custom_model
318
 
@@ -344,10 +344,10 @@ class StreamingCompletion:
344
 
345
  class Completion:
346
  def create(
347
- model: str = 'gpt-4',
348
- custom_model: str = None,
349
- prompt: str = 'hello world',
350
- token: str = '',
351
  ):
352
  models = {
353
  'sage': 'capybara',
@@ -389,12 +389,12 @@ class Completion:
389
 
390
  class Poe:
391
  def __init__(
392
- self,
393
- model: str = 'ChatGPT',
394
- driver: str = 'firefox',
395
- download_driver: bool = False,
396
- driver_path: Optional[str] = None,
397
- cookie_path: str = './quora/cookie.json',
398
  ):
399
  # validating the model
400
  if model and model not in MODELS:
@@ -473,12 +473,12 @@ class Poe:
473
  return response
474
 
475
  def create_bot(
476
- self,
477
- name: str,
478
- /,
479
- prompt: str = '',
480
- base_model: str = 'ChatGPT',
481
- description: str = '',
482
  ) -> None:
483
  if base_model not in MODELS:
484
  raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
 
116
  class Model:
117
  @staticmethod
118
  def create(
119
+ token: str,
120
+ model: str = 'gpt-3.5-turbo', # claude-instant
121
+ system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
122
+ description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
123
+ handle: str = None,
124
  ) -> ModelResponse:
125
  models = {
126
  'gpt-3.5-turbo': 'chinchilla',
 
202
  class Account:
203
  @staticmethod
204
  def create(
205
+ proxy: Optional[str] = None,
206
+ logging: bool = False,
207
+ enable_bot_creation: bool = False,
208
  ):
209
  client = TLS(client_identifier='chrome110')
210
  client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
 
309
  class StreamingCompletion:
310
  @staticmethod
311
  def create(
312
+ model: str = 'gpt-4',
313
+ custom_model: bool = None,
314
+ prompt: str = 'hello world',
315
+ token: str = '',
316
  ):
317
  _model = MODELS[model] if not custom_model else custom_model
318
 
 
344
 
345
  class Completion:
346
  def create(
347
+ model: str = 'gpt-4',
348
+ custom_model: str = None,
349
+ prompt: str = 'hello world',
350
+ token: str = '',
351
  ):
352
  models = {
353
  'sage': 'capybara',
 
389
 
390
  class Poe:
391
  def __init__(
392
+ self,
393
+ model: str = 'ChatGPT',
394
+ driver: str = 'firefox',
395
+ download_driver: bool = False,
396
+ driver_path: Optional[str] = None,
397
+ cookie_path: str = './quora/cookie.json',
398
  ):
399
  # validating the model
400
  if model and model not in MODELS:
 
473
  return response
474
 
475
  def create_bot(
476
+ self,
477
+ name: str,
478
+ /,
479
+ prompt: str = '',
480
+ base_model: str = 'ChatGPT',
481
+ description: str = '',
482
  ) -> None:
483
  if base_model not in MODELS:
484
  raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
quora/api.py CHANGED
@@ -384,7 +384,7 @@ class Client:
384
  continue
385
 
386
  # update info about response
387
- message["text_new"] = message["text"][len(last_text) :]
388
  last_text = message["text"]
389
  message_id = message["messageId"]
390
 
@@ -456,21 +456,21 @@ class Client:
456
  logger.info(f"No more messages left to delete.")
457
 
458
  def create_bot(
459
- self,
460
- handle,
461
- prompt="",
462
- base_model="chinchilla",
463
- description="",
464
- intro_message="",
465
- api_key=None,
466
- api_bot=False,
467
- api_url=None,
468
- prompt_public=True,
469
- pfp_url=None,
470
- linkification=False,
471
- markdown_rendering=True,
472
- suggested_replies=False,
473
- private=False,
474
  ):
475
  result = self.send_query(
476
  "PoeBotCreateMutation",
@@ -499,21 +499,21 @@ class Client:
499
  return data
500
 
501
  def edit_bot(
502
- self,
503
- bot_id,
504
- handle,
505
- prompt="",
506
- base_model="chinchilla",
507
- description="",
508
- intro_message="",
509
- api_key=None,
510
- api_url=None,
511
- private=False,
512
- prompt_public=True,
513
- pfp_url=None,
514
- linkification=False,
515
- markdown_rendering=True,
516
- suggested_replies=False,
517
  ):
518
  result = self.send_query(
519
  "PoeBotEditMutation",
 
384
  continue
385
 
386
  # update info about response
387
+ message["text_new"] = message["text"][len(last_text):]
388
  last_text = message["text"]
389
  message_id = message["messageId"]
390
 
 
456
  logger.info(f"No more messages left to delete.")
457
 
458
  def create_bot(
459
+ self,
460
+ handle,
461
+ prompt="",
462
+ base_model="chinchilla",
463
+ description="",
464
+ intro_message="",
465
+ api_key=None,
466
+ api_bot=False,
467
+ api_url=None,
468
+ prompt_public=True,
469
+ pfp_url=None,
470
+ linkification=False,
471
+ markdown_rendering=True,
472
+ suggested_replies=False,
473
+ private=False,
474
  ):
475
  result = self.send_query(
476
  "PoeBotCreateMutation",
 
499
  return data
500
 
501
  def edit_bot(
502
+ self,
503
+ bot_id,
504
+ handle,
505
+ prompt="",
506
+ base_model="chinchilla",
507
+ description="",
508
+ intro_message="",
509
+ api_key=None,
510
+ api_url=None,
511
+ private=False,
512
+ prompt_public=True,
513
+ pfp_url=None,
514
+ linkification=False,
515
+ markdown_rendering=True,
516
+ suggested_replies=False,
517
  ):
518
  result = self.send_query(
519
  "PoeBotEditMutation",
testing/forefront_test.py CHANGED
@@ -5,7 +5,6 @@ token = forefront.Account.create(logging=True)
5
  print(token)
6
 
7
  # get a response
8
- for response in forefront.StreamingCompletion.create(token = token,
9
- prompt = 'hello world', model='gpt-4'):
10
-
11
- print(response.completion.choices[0].text, end = '')
 
5
  print(token)
6
 
7
  # get a response
8
+ for response in forefront.StreamingCompletion.create(token=token,
9
+ prompt='hello world', model='gpt-4'):
10
+ print(response.completion.choices[0].text, end='')
 
testing/phind_test.py CHANGED
@@ -2,18 +2,19 @@ import phind
2
 
3
  # set cf_clearance cookie ( not needed at the moment)
4
  phind.cf_clearance = 'MDzwnr3ZWk_ap8u.iwwMR5F3WccfOkhUy_zGNDpcF3s-1682497341-0-160'
5
- phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
6
 
7
  prompt = 'hello world'
8
 
9
  # normal completion
10
  result = phind.Completion.create(
11
- model = 'gpt-4',
12
- prompt = prompt,
13
- results = phind.Search.create(prompt, actualSearch = False), # create search (set actualSearch to False to disable internet)
14
- creative = False,
15
- detailed = False,
16
- codeContext = '') # up to 3000 chars of code
 
17
 
18
  print(result.completion.choices[0].text)
19
 
@@ -22,11 +23,12 @@ prompt = 'who won the quatar world cup'
22
  # help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
23
  # stream completion
24
  for result in phind.StreamingCompletion.create(
25
- model = 'gpt-4',
26
- prompt = prompt,
27
- results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
28
- creative = False,
29
- detailed = False,
30
- codeContext = ''): # up to 3000 chars of code
 
31
 
32
- print(result.completion.choices[0].text, end='', flush=True)
 
2
 
3
  # set cf_clearance cookie ( not needed at the moment)
4
  phind.cf_clearance = 'MDzwnr3ZWk_ap8u.iwwMR5F3WccfOkhUy_zGNDpcF3s-1682497341-0-160'
5
+ phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
6
 
7
  prompt = 'hello world'
8
 
9
  # normal completion
10
  result = phind.Completion.create(
11
+ model='gpt-4',
12
+ prompt=prompt,
13
+ results=phind.Search.create(prompt, actualSearch=False),
14
+ # create search (set actualSearch to False to disable internet)
15
+ creative=False,
16
+ detailed=False,
17
+ codeContext='') # up to 3000 chars of code
18
 
19
  print(result.completion.choices[0].text)
20
 
 
23
  # help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
24
  # stream completion
25
  for result in phind.StreamingCompletion.create(
26
+ model='gpt-4',
27
+ prompt=prompt,
28
+ results=phind.Search.create(prompt, actualSearch=True),
29
+ # create search (set actualSearch to False to disable internet)
30
+ creative=False,
31
+ detailed=False,
32
+ codeContext=''): # up to 3000 chars of code
33
 
34
+ print(result.completion.choices[0].text, end='', flush=True)
testing/poe_account_create_test.py CHANGED
@@ -1,90 +1,90 @@
1
- from requests import Session
2
- from tls_client import Session as TLS
3
- from json import dumps
4
  from hashlib import md5
5
- from time import sleep
6
  from re import findall
7
- from pypasser import reCaptchaV3
 
 
 
8
  from quora import extract_formkey
9
  from quora.mail import Emailnator
10
- from twocaptcha import TwoCaptcha
11
 
12
  solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
13
 
 
14
  class Account:
15
  def create(proxy: None or str = None, logging: bool = False, enable_bot_creation: bool = False):
16
- client = TLS(client_identifier='chrome110')
17
  client.proxies = {
18
  'http': f'http://{proxy}',
19
  'https': f'http://{proxy}'} if proxy else None
20
 
21
- mail_client = Emailnator()
22
- mail_address = mail_client.get_mail()
23
 
24
  if logging: print('email', mail_address)
25
 
26
  client.headers = {
27
- 'authority' : 'poe.com',
28
- 'accept' : '*/*',
29
  'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
30
- 'content-type' : 'application/json',
31
- 'origin' : 'https://poe.com',
32
- 'poe-formkey' : 'null',
33
- 'poe-tag-id' : 'null',
34
- 'poe-tchannel' : 'null',
35
- 'referer' : 'https://poe.com/login',
36
- 'sec-ch-ua' : '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
37
- 'sec-ch-ua-mobile' : '?0',
38
  'sec-ch-ua-platform': '"macOS"',
39
  'sec-fetch-dest': 'empty',
40
  'sec-fetch-mode': 'cors',
41
  'sec-fetch-site': 'same-origin',
42
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
43
  }
44
 
45
- client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
46
  client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
47
 
48
- #token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal')
49
  token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
50
- url = 'https://poe.com/login?redirect_url=%2F',
51
- version = 'v3',
52
- enterprise = 1,
53
- invisible = 1,
54
- action = 'login',)['code']
55
 
56
- payload = dumps(separators = (',', ':'), obj = {
57
  'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
58
  'variables': {
59
- 'emailAddress' : mail_address,
60
- 'phoneNumber' : None,
61
  'recaptchaToken': token
62
  },
63
  'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
64
  })
65
 
66
  base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
67
- client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
68
-
69
  print(dumps(client.headers, indent=4))
70
-
71
  response = client.post('https://poe.com/api/gql_POST', data=payload)
72
-
73
  if 'automated_request_detected' in response.text:
74
  print('please try using a proxy / wait for fix')
75
-
76
  if 'Bad Request' in response.text:
77
- if logging: print('bad request, retrying...' , response.json())
78
  quit()
79
 
80
- if logging: print('send_code' ,response.json())
81
-
82
  mail_content = mail_client.get_message()
83
- mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
84
 
85
  if logging: print('code', mail_token)
86
 
87
- payload = dumps(separators = (',', ':'), obj={
88
  "queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
89
  "variables": {
90
  "verificationCode": str(mail_token),
@@ -95,10 +95,10 @@ class Account:
95
  })
96
 
97
  base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
98
- client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
99
 
100
- response = client.post('https://poe.com/api/gql_POST', data = payload)
101
  if logging: print('verify_code', response.json())
102
-
103
 
104
- Account.create(proxy = 'xtekky:[email protected]:12321', logging = True)
 
 
 
 
 
1
  from hashlib import md5
2
+ from json import dumps
3
  from re import findall
4
+
5
+ from tls_client import Session as TLS
6
+ from twocaptcha import TwoCaptcha
7
+
8
  from quora import extract_formkey
9
  from quora.mail import Emailnator
 
10
 
11
  solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
12
 
13
+
14
  class Account:
15
  def create(proxy: None or str = None, logging: bool = False, enable_bot_creation: bool = False):
16
+ client = TLS(client_identifier='chrome110')
17
  client.proxies = {
18
  'http': f'http://{proxy}',
19
  'https': f'http://{proxy}'} if proxy else None
20
 
21
+ mail_client = Emailnator()
22
+ mail_address = mail_client.get_mail()
23
 
24
  if logging: print('email', mail_address)
25
 
26
  client.headers = {
27
+ 'authority': 'poe.com',
28
+ 'accept': '*/*',
29
  'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
30
+ 'content-type': 'application/json',
31
+ 'origin': 'https://poe.com',
32
+ 'poe-formkey': 'null',
33
+ 'poe-tag-id': 'null',
34
+ 'poe-tchannel': 'null',
35
+ 'referer': 'https://poe.com/login',
36
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
37
+ 'sec-ch-ua-mobile': '?0',
38
  'sec-ch-ua-platform': '"macOS"',
39
  'sec-fetch-dest': 'empty',
40
  'sec-fetch-mode': 'cors',
41
  'sec-fetch-site': 'same-origin',
42
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
43
  }
44
 
45
+ client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
46
  client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
47
 
48
+ # token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal')
49
  token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
50
+ url='https://poe.com/login?redirect_url=%2F',
51
+ version='v3',
52
+ enterprise=1,
53
+ invisible=1,
54
+ action='login', )['code']
55
 
56
+ payload = dumps(separators=(',', ':'), obj={
57
  'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
58
  'variables': {
59
+ 'emailAddress': mail_address,
60
+ 'phoneNumber': None,
61
  'recaptchaToken': token
62
  },
63
  'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
64
  })
65
 
66
  base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
67
+ client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
68
+
69
  print(dumps(client.headers, indent=4))
70
+
71
  response = client.post('https://poe.com/api/gql_POST', data=payload)
72
+
73
  if 'automated_request_detected' in response.text:
74
  print('please try using a proxy / wait for fix')
75
+
76
  if 'Bad Request' in response.text:
77
+ if logging: print('bad request, retrying...', response.json())
78
  quit()
79
 
80
+ if logging: print('send_code', response.json())
81
+
82
  mail_content = mail_client.get_message()
83
+ mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
84
 
85
  if logging: print('code', mail_token)
86
 
87
+ payload = dumps(separators=(',', ':'), obj={
88
  "queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
89
  "variables": {
90
  "verificationCode": str(mail_token),
 
95
  })
96
 
97
  base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
98
+ client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
99
 
100
+ response = client.post('https://poe.com/api/gql_POST', data=payload)
101
  if logging: print('verify_code', response.json())
 
102
 
103
+
104
+ Account.create(proxy='xtekky:[email protected]:12321', logging=True)
testing/poe_test.py CHANGED
@@ -1,13 +1,13 @@
1
- import quora
2
  from time import sleep
3
 
4
- token = quora.Account.create(proxy = None,logging = True)
 
 
5
  print('token', token)
6
 
7
  sleep(2)
8
 
9
- for response in quora.StreamingCompletion.create(model = 'gpt-3.5-turbo',
10
- prompt = 'hello world',
11
- token = token):
12
-
13
- print(response.completion.choices[0].text, end="", flush=True)
 
 
1
  from time import sleep
2
 
3
+ import quora
4
+
5
+ token = quora.Account.create(proxy=None, logging=True)
6
  print('token', token)
7
 
8
  sleep(2)
9
 
10
+ for response in quora.StreamingCompletion.create(model='gpt-3.5-turbo',
11
+ prompt='hello world',
12
+ token=token):
13
+ print(response.completion.choices[0].text, end="", flush=True)
 
testing/quora_test_2.py CHANGED
@@ -1,18 +1,17 @@
1
  import quora
2
 
3
- token = quora.Account.create(logging = True, enable_bot_creation=True)
4
 
5
  model = quora.Model.create(
6
- token = token,
7
- model = 'gpt-3.5-turbo', # or claude-instant-v1.0
8
- system_prompt = 'you are ChatGPT a large language model ...'
9
  )
10
 
11
  print(model.name)
12
 
13
  for response in quora.StreamingCompletion.create(
14
- custom_model = model.name,
15
- prompt ='hello world',
16
- token = token):
17
-
18
- print(response.completion.choices[0].text)
 
1
  import quora
2
 
3
+ token = quora.Account.create(logging=True, enable_bot_creation=True)
4
 
5
  model = quora.Model.create(
6
+ token=token,
7
+ model='gpt-3.5-turbo', # or claude-instant-v1.0
8
+ system_prompt='you are ChatGPT a large language model ...'
9
  )
10
 
11
  print(model.name)
12
 
13
  for response in quora.StreamingCompletion.create(
14
+ custom_model=model.name,
15
+ prompt='hello world',
16
+ token=token):
17
+ print(response.completion.choices[0].text)
 
testing/sqlchat_test.py CHANGED
@@ -1,7 +1,6 @@
1
  import sqlchat
2
 
3
  for response in sqlchat.StreamCompletion.create(
4
- prompt = 'write python code to reverse a string',
5
- messages = []):
6
-
7
- print(response.completion.choices[0].text, end='')
 
1
  import sqlchat
2
 
3
  for response in sqlchat.StreamCompletion.create(
4
+ prompt='write python code to reverse a string',
5
+ messages=[]):
6
+ print(response.completion.choices[0].text, end='')
 
testing/t3nsor_test.py CHANGED
@@ -1,7 +1,6 @@
1
  import t3nsor
2
 
3
  for response in t3nsor.StreamCompletion.create(
4
- prompt = 'write python code to reverse a string',
5
- messages = []):
6
-
7
  print(response.completion.choices[0].text)
 
1
  import t3nsor
2
 
3
  for response in t3nsor.StreamCompletion.create(
4
+ prompt='write python code to reverse a string',
5
+ messages=[]):
 
6
  print(response.completion.choices[0].text)
testing/writesonic_test.py CHANGED
@@ -2,29 +2,29 @@
2
  import writesonic
3
 
4
  # create account (3-4s)
5
- account = writesonic.Account.create(logging = True)
6
 
7
  # with loging:
8
- # 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
9
- # 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
10
- # 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
11
- # 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
12
 
13
  # simple completion
14
  response = writesonic.Completion.create(
15
- api_key = account.key,
16
- prompt = 'hello world'
17
  )
18
 
19
- print(response.completion.choices[0].text) # Hello! How may I assist you today?
20
 
21
  # conversation
22
 
23
  response = writesonic.Completion.create(
24
- api_key = account.key,
25
- prompt = 'what is my name ?',
26
- enable_memory = True,
27
- history_data = [
28
  {
29
  'is_sent': True,
30
  'message': 'my name is Tekky'
@@ -36,14 +36,14 @@ response = writesonic.Completion.create(
36
  ]
37
  )
38
 
39
- print(response.completion.choices[0].text) # Your name is Tekky.
40
 
41
  # enable internet
42
 
43
  response = writesonic.Completion.create(
44
- api_key = account.key,
45
- prompt = 'who won the quatar world cup ?',
46
- enable_google_results = True
47
  )
48
 
49
- print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
 
2
  import writesonic
3
 
4
  # create account (3-4s)
5
+ account = writesonic.Account.create(logging=True)
6
 
7
  # with loging:
8
+ # 2023-04-06 21:50:25 INFO __main__ -> register success : '{"id":"51aa0809-3053-44f7-922a...' (2s)
9
+ # 2023-04-06 21:50:25 INFO __main__ -> id : '51aa0809-3053-44f7-922a-2b85d8d07edf'
10
+ # 2023-04-06 21:50:25 INFO __main__ -> token : 'eyJhbGciOiJIUzI1NiIsInR5cCI6Ik...'
11
+ # 2023-04-06 21:50:28 INFO __main__ -> got key : '194158c4-d249-4be0-82c6-5049e869533c' (2s)
12
 
13
  # simple completion
14
  response = writesonic.Completion.create(
15
+ api_key=account.key,
16
+ prompt='hello world'
17
  )
18
 
19
+ print(response.completion.choices[0].text) # Hello! How may I assist you today?
20
 
21
  # conversation
22
 
23
  response = writesonic.Completion.create(
24
+ api_key=account.key,
25
+ prompt='what is my name ?',
26
+ enable_memory=True,
27
+ history_data=[
28
  {
29
  'is_sent': True,
30
  'message': 'my name is Tekky'
 
36
  ]
37
  )
38
 
39
+ print(response.completion.choices[0].text) # Your name is Tekky.
40
 
41
  # enable internet
42
 
43
  response = writesonic.Completion.create(
44
+ api_key=account.key,
45
+ prompt='who won the quatar world cup ?',
46
+ enable_google_results=True
47
  )
48
 
49
+ print(response.completion.choices[0].text) # Argentina won the 2022 FIFA World Cup tournament held in Qatar ...
unfinished/bard/__init__.py CHANGED
@@ -1,12 +1,12 @@
1
- from requests import Session
2
- from re import search
3
- from random import randint
4
  from json import dumps, loads
5
- from urllib.parse import urlencode
6
- from dotenv import load_dotenv
7
  from os import getenv
 
 
 
8
 
9
  from bard.typings import BardResponse
 
 
10
 
11
  load_dotenv()
12
  token = getenv('1psid')
@@ -62,16 +62,17 @@ class Completion:
62
  'rt': 'c',
63
  })
64
 
65
- response = client.post(f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
66
- data={
67
- 'at': snlm0e,
68
- 'f.req': dumps([None, dumps([
69
- [prompt],
70
- None,
71
- [conversation_id, response_id, choice_id],
72
- ])])
73
- }
74
- )
 
75
 
76
  chat_data = loads(response.content.splitlines()[3])[0][2]
77
  if not chat_data:
 
 
 
 
1
  from json import dumps, loads
 
 
2
  from os import getenv
3
+ from random import randint
4
+ from re import search
5
+ from urllib.parse import urlencode
6
 
7
  from bard.typings import BardResponse
8
+ from dotenv import load_dotenv
9
+ from requests import Session
10
 
11
  load_dotenv()
12
  token = getenv('1psid')
 
62
  'rt': 'c',
63
  })
64
 
65
+ response = client.post(
66
+ f'https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate?{params}',
67
+ data={
68
+ 'at': snlm0e,
69
+ 'f.req': dumps([None, dumps([
70
+ [prompt],
71
+ None,
72
+ [conversation_id, response_id, choice_id],
73
+ ])])
74
+ }
75
+ )
76
 
77
  chat_data = loads(response.content.splitlines()[3])[0][2]
78
  if not chat_data:
unfinished/bard/typings.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Optional, Union
2
 
3
 
4
  class BardResponse:
 
1
+ from typing import Dict, List, Union
2
 
3
 
4
  class BardResponse:
unfinished/bing/__ini__.py CHANGED
@@ -1,14 +1,12 @@
1
  # Import necessary libraries
2
- from requests import get
3
- from browser_cookie3 import edge, chrome
4
- from ssl import create_default_context
5
- from certifi import where
6
- from uuid import uuid4
7
- from random import randint
8
  from json import dumps, loads
 
9
 
10
- import asyncio
11
  import websockets
 
 
 
12
 
13
  # Set up SSL context
14
  ssl_context = create_default_context()
@@ -28,14 +26,14 @@ def get_token():
28
 
29
  class AsyncCompletion:
30
  async def create(
31
- prompt: str = 'hello world',
32
- optionSets: list = [
33
- 'deepleo',
34
- 'enable_debug_commands',
35
- 'disable_emoji_spoken_text',
36
- 'enablemm',
37
- 'h3relaxedimg'
38
- ],
39
  token: str = get_token()):
40
  """Create a connection to Bing AI and send the prompt."""
41
 
@@ -83,7 +81,7 @@ class AsyncCompletion:
83
  continue
84
 
85
  response = loads(obj)
86
- if response.get('type') == 1 and response['arguments'][0].get('messages',):
87
  response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
88
  'text')
89
 
@@ -99,11 +97,12 @@ class AsyncCompletion:
99
  async def run():
100
  """Run the async completion and print the result."""
101
  async for value in AsyncCompletion.create(
102
- prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z',
103
- optionSets=[
104
- "galileo",
105
- ]
106
  ):
107
  print(value, end='', flush=True)
108
 
 
109
  asyncio.run(run())
 
1
  # Import necessary libraries
2
+ import asyncio
 
 
 
 
 
3
  from json import dumps, loads
4
+ from ssl import create_default_context
5
 
 
6
  import websockets
7
+ from browser_cookie3 import edge
8
+ from certifi import where
9
+ from requests import get
10
 
11
  # Set up SSL context
12
  ssl_context = create_default_context()
 
26
 
27
  class AsyncCompletion:
28
  async def create(
29
+ prompt: str = 'hello world',
30
+ optionSets: list = [
31
+ 'deepleo',
32
+ 'enable_debug_commands',
33
+ 'disable_emoji_spoken_text',
34
+ 'enablemm',
35
+ 'h3relaxedimg'
36
+ ],
37
  token: str = get_token()):
38
  """Create a connection to Bing AI and send the prompt."""
39
 
 
81
  continue
82
 
83
  response = loads(obj)
84
+ if response.get('type') == 1 and response['arguments'][0].get('messages', ):
85
  response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
86
  'text')
87
 
 
97
  async def run():
98
  """Run the async completion and print the result."""
99
  async for value in AsyncCompletion.create(
100
+ prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z',
101
+ optionSets=[
102
+ "galileo",
103
+ ]
104
  ):
105
  print(value, end='', flush=True)
106
 
107
+
108
  asyncio.run(run())
unfinished/cocalc/__init__.py CHANGED
@@ -6,7 +6,6 @@ class Completion:
6
  system_prompt=("ASSUME I HAVE FULL ACCESS TO COCALC. ENCLOSE MATH IN $. "
7
  "INCLUDE THE LANGUAGE DIRECTLY AFTER THE TRIPLE BACKTICKS "
8
  "IN ALL MARKDOWN CODE BLOCKS. How can I do the following using CoCalc?")) -> str:
9
-
10
  # Initialize a session with custom headers
11
  session = self._initialize_session()
12
 
 
6
  system_prompt=("ASSUME I HAVE FULL ACCESS TO COCALC. ENCLOSE MATH IN $. "
7
  "INCLUDE THE LANGUAGE DIRECTLY AFTER THE TRIPLE BACKTICKS "
8
  "IN ALL MARKDOWN CODE BLOCKS. How can I do the following using CoCalc?")) -> str:
 
9
  # Initialize a session with custom headers
10
  session = self._initialize_session()
11
 
unfinished/cocalc/cocalc_test.py CHANGED
@@ -1,8 +1,7 @@
1
  import cocalc
2
 
3
-
4
  response = cocalc.Completion.create(
5
- prompt = 'hello world'
6
  )
7
 
8
- print(response)
 
1
  import cocalc
2
 
 
3
  response = cocalc.Completion.create(
4
+ prompt='hello world'
5
  )
6
 
7
+ print(response)
unfinished/easyai/main.py CHANGED
@@ -1,7 +1,8 @@
1
  # Import necessary libraries
2
- from requests import get
3
- from os import urandom
4
  from json import loads
 
 
 
5
 
6
  # Generate a random session ID
7
  sessionId = urandom(10).hex()
 
1
  # Import necessary libraries
 
 
2
  from json import loads
3
+ from os import urandom
4
+
5
+ from requests import get
6
 
7
  # Generate a random session ID
8
  sessionId = urandom(10).hex()
unfinished/gptbz/__init__.py CHANGED
@@ -1,6 +1,8 @@
1
- import websockets
2
  from json import dumps, loads
3
 
 
 
 
4
  # Define the asynchronous function to test the WebSocket connection
5
 
6
 
 
 
1
  from json import dumps, loads
2
 
3
+ import websockets
4
+
5
+
6
  # Define the asynchronous function to test the WebSocket connection
7
 
8
 
unfinished/openai/__ini__.py CHANGED
@@ -1,7 +1,8 @@
1
  # Import required libraries
2
- from tls_client import Session
3
  from uuid import uuid4
 
4
  from browser_cookie3 import chrome
 
5
 
6
 
7
  class OpenAIChat:
 
1
  # Import required libraries
 
2
  from uuid import uuid4
3
+
4
  from browser_cookie3 import chrome
5
+ from tls_client import Session
6
 
7
 
8
  class OpenAIChat:
unfinished/openaihosted/__init__.py CHANGED
@@ -1,7 +1,8 @@
1
- import requests
2
  import json
3
  import re
4
 
 
 
5
  headers = {
6
  'authority': 'openai.a2hosted.com',
7
  'accept': 'text/event-stream',
@@ -13,10 +14,12 @@ headers = {
13
  'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.0.0',
14
  }
15
 
 
16
  def create_query_param(conversation):
17
  encoded_conversation = json.dumps(conversation)
18
  return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
19
 
 
20
  user_input = input("Enter your message: ")
21
 
22
  data = [
 
 
1
  import json
2
  import re
3
 
4
+ import requests
5
+
6
  headers = {
7
  'authority': 'openai.a2hosted.com',
8
  'accept': 'text/event-stream',
 
14
  'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.0.0',
15
  }
16
 
17
+
18
  def create_query_param(conversation):
19
  encoded_conversation = json.dumps(conversation)
20
  return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
21
 
22
+
23
  user_input = input("Enter your message: ")
24
 
25
  data = [
unfinished/openprompt/create.py CHANGED
@@ -1,9 +1,9 @@
1
- from requests import post, get
2
  from json import dumps
3
- #from mail import MailClient
4
- from time import sleep
5
  from re import findall
6
 
 
 
7
  html = get('https://developermail.com/mail/')
8
  print(html.cookies.get('mailboxId'))
9
  email = findall(r'mailto:(.*)">', html.text)[0]
@@ -15,9 +15,9 @@ headers = {
15
  }
16
 
17
  json_data = {
18
- 'email' : email,
19
  'password': 'T4xyt4Yn6WWQ4NC',
20
- 'data' : {},
21
  'gotrue_meta_security': {},
22
  }
23
 
@@ -27,20 +27,20 @@ print(response.json())
27
  # email_link = None
28
  # while not email_link:
29
  # sleep(1)
30
-
31
  # mails = mailbox.getmails()
32
  # print(mails)
33
 
34
 
35
  quit()
36
 
37
- url = input("Enter the url: ")
38
  response = get(url, allow_redirects=False)
39
 
40
  # https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup
41
 
42
- redirect = response.headers.get('location')
43
- access_token = redirect.split('&')[0].split('=')[1]
44
  refresh_token = redirect.split('&')[2].split('=')[1]
45
 
46
  supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':'))
@@ -61,4 +61,4 @@ json_data = {
61
 
62
  response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True)
63
  for chunk in response.iter_content(chunk_size=1024):
64
- print(chunk)
 
 
1
  from json import dumps
2
+ # from mail import MailClient
 
3
  from re import findall
4
 
5
+ from requests import post, get
6
+
7
  html = get('https://developermail.com/mail/')
8
  print(html.cookies.get('mailboxId'))
9
  email = findall(r'mailto:(.*)">', html.text)[0]
 
15
  }
16
 
17
  json_data = {
18
+ 'email': email,
19
  'password': 'T4xyt4Yn6WWQ4NC',
20
+ 'data': {},
21
  'gotrue_meta_security': {},
22
  }
23
 
 
27
  # email_link = None
28
  # while not email_link:
29
  # sleep(1)
30
+
31
  # mails = mailbox.getmails()
32
  # print(mails)
33
 
34
 
35
  quit()
36
 
37
+ url = input("Enter the url: ")
38
  response = get(url, allow_redirects=False)
39
 
40
  # https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup
41
 
42
+ redirect = response.headers.get('location')
43
+ access_token = redirect.split('&')[0].split('=')[1]
44
  refresh_token = redirect.split('&')[2].split('=')[1]
45
 
46
  supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':'))
 
61
 
62
  response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True)
63
  for chunk in response.iter_content(chunk_size=1024):
64
+ print(chunk)
unfinished/openprompt/mail.py CHANGED
@@ -1,6 +1,8 @@
1
- import requests
2
  import email
3
 
 
 
 
4
  class MailClient:
5
 
6
  def __init__(self):
@@ -106,4 +108,4 @@ class MailClient:
106
 
107
  client = MailClient()
108
  client.newtoken()
109
- print(client.getmails())
 
 
1
  import email
2
 
3
+ import requests
4
+
5
+
6
  class MailClient:
7
 
8
  def __init__(self):
 
108
 
109
  client = MailClient()
110
  client.newtoken()
111
+ print(client.getmails())
unfinished/openprompt/main.py CHANGED
@@ -30,8 +30,7 @@ json_data = {
30
  ],
31
  }
32
 
33
- response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data, stream=True)
 
34
  for chunk in response.iter_content(chunk_size=1024):
35
  print(chunk)
36
-
37
-
 
30
  ],
31
  }
32
 
33
+ response = requests.post('https://openprompt.co/api/chat2', cookies=cookies, headers=headers, json=json_data,
34
+ stream=True)
35
  for chunk in response.iter_content(chunk_size=1024):
36
  print(chunk)
 
 
unfinished/openprompt/test.py CHANGED
@@ -1,7 +1,6 @@
1
  access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV'
2
- supabase_auth_token= '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
3
-
4
 
5
  idk = [
6
  "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8",
7
- "_Zp8uXIA2InTDKYgo8TCqA",None,None,None]
 
1
  access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV'
2
+ supabase_auth_token = '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D'
 
3
 
4
  idk = [
5
  "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8",
6
+ "_Zp8uXIA2InTDKYgo8TCqA", None, None, None]
unfinished/t3nsor/__init__.py CHANGED
@@ -1,5 +1,6 @@
 
 
1
  from requests import post
2
- from time import time
3
 
4
  headers = {
5
  'authority': 'www.t3nsor.tech',
@@ -19,18 +20,17 @@ headers = {
19
  'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
20
  }
21
 
 
22
  class T3nsorResponse:
23
-
24
  class Completion:
25
-
26
  class Choices:
27
  def __init__(self, choice: dict) -> None:
28
- self.text = choice['text']
29
- self.content = self.text.encode()
30
- self.index = choice['index']
31
- self.logprobs = choice['logprobs']
32
- self.finish_reason = choice['finish_reason']
33
-
34
  def __repr__(self) -> str:
35
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
36
 
@@ -39,99 +39,98 @@ class T3nsorResponse:
39
 
40
  class Usage:
41
  def __init__(self, usage_dict: dict) -> None:
42
- self.prompt_tokens = usage_dict['prompt_chars']
43
- self.completion_tokens = usage_dict['completion_chars']
44
- self.total_tokens = usage_dict['total_chars']
45
 
46
  def __repr__(self):
47
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
48
-
49
  def __init__(self, response_dict: dict) -> None:
50
-
51
- self.response_dict = response_dict
52
- self.id = response_dict['id']
53
- self.object = response_dict['object']
54
- self.created = response_dict['created']
55
- self.model = response_dict['model']
56
- self.completion = self.Completion(response_dict['choices'])
57
- self.usage = self.Usage(response_dict['usage'])
58
 
59
  def json(self) -> dict:
60
  return self.response_dict
61
 
 
62
  class Completion:
63
  model = {
64
  'model': {
65
- 'id' : 'gpt-3.5-turbo',
66
- 'name' : 'Default (GPT-3.5)'
67
  }
68
  }
69
 
70
  def create(
71
- prompt: str = 'hello world',
72
- messages: list = []) -> T3nsorResponse:
73
-
74
- response = post('https://www.t3nsor.tech/api/chat', headers = headers, json = Completion.model | {
75
- 'messages' : messages,
76
- 'key' : '',
77
- 'prompt' : prompt
78
  })
79
 
80
  return T3nsorResponse({
81
- 'id' : f'cmpl-1337-{int(time())}',
82
- 'object' : 'text_completion',
83
- 'created': int(time()),
84
- 'model' : Completion.model,
85
  'choices': [{
86
- 'text' : response.text,
87
- 'index' : 0,
88
- 'logprobs' : None,
89
- 'finish_reason' : 'stop'
90
- }],
91
  'usage': {
92
- 'prompt_chars' : len(prompt),
93
- 'completion_chars' : len(response.text),
94
- 'total_chars' : len(prompt) + len(response.text)
95
  }
96
  })
97
 
 
98
  class StreamCompletion:
99
  model = {
100
  'model': {
101
- 'id' : 'gpt-3.5-turbo',
102
- 'name' : 'Default (GPT-3.5)'
103
  }
104
  }
105
 
106
  def create(
107
- prompt: str = 'hello world',
108
- messages: list = []) -> T3nsorResponse:
109
-
110
  print('t3nsor api is down, this may not work, refer to another module')
111
 
112
- response = post('https://www.t3nsor.tech/api/chat', headers = headers, stream = True, json = Completion.model | {
113
- 'messages' : messages,
114
- 'key' : '',
115
- 'prompt' : prompt
116
  })
117
-
118
- for chunk in response.iter_content(chunk_size = 2046):
119
  yield T3nsorResponse({
120
- 'id' : f'cmpl-1337-{int(time())}',
121
- 'object' : 'text_completion',
122
- 'created': int(time()),
123
- 'model' : Completion.model,
124
-
125
  'choices': [{
126
- 'text' : chunk.decode(),
127
- 'index' : 0,
128
- 'logprobs' : None,
129
- 'finish_reason' : 'stop'
130
  }],
131
-
132
  'usage': {
133
- 'prompt_chars' : len(prompt),
134
- 'completion_chars' : len(chunk.decode()),
135
- 'total_chars' : len(prompt) + len(chunk.decode())
136
  }
137
  })
 
1
+ from time import time
2
+
3
  from requests import post
 
4
 
5
  headers = {
6
  'authority': 'www.t3nsor.tech',
 
20
  'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
21
  }
22
 
23
+
24
  class T3nsorResponse:
 
25
  class Completion:
 
26
  class Choices:
27
  def __init__(self, choice: dict) -> None:
28
+ self.text = choice['text']
29
+ self.content = self.text.encode()
30
+ self.index = choice['index']
31
+ self.logprobs = choice['logprobs']
32
+ self.finish_reason = choice['finish_reason']
33
+
34
  def __repr__(self) -> str:
35
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
36
 
 
39
 
40
  class Usage:
41
  def __init__(self, usage_dict: dict) -> None:
42
+ self.prompt_tokens = usage_dict['prompt_chars']
43
+ self.completion_tokens = usage_dict['completion_chars']
44
+ self.total_tokens = usage_dict['total_chars']
45
 
46
  def __repr__(self):
47
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
48
+
49
  def __init__(self, response_dict: dict) -> None:
50
+ self.response_dict = response_dict
51
+ self.id = response_dict['id']
52
+ self.object = response_dict['object']
53
+ self.created = response_dict['created']
54
+ self.model = response_dict['model']
55
+ self.completion = self.Completion(response_dict['choices'])
56
+ self.usage = self.Usage(response_dict['usage'])
 
57
 
58
  def json(self) -> dict:
59
  return self.response_dict
60
 
61
+
62
  class Completion:
63
  model = {
64
  'model': {
65
+ 'id': 'gpt-3.5-turbo',
66
+ 'name': 'Default (GPT-3.5)'
67
  }
68
  }
69
 
70
  def create(
71
+ prompt: str = 'hello world',
72
+ messages: list = []) -> T3nsorResponse:
73
+ response = post('https://www.t3nsor.tech/api/chat', headers=headers, json=Completion.model | {
74
+ 'messages': messages,
75
+ 'key': '',
76
+ 'prompt': prompt
 
77
  })
78
 
79
  return T3nsorResponse({
80
+ 'id': f'cmpl-1337-{int(time())}',
81
+ 'object': 'text_completion',
82
+ 'created': int(time()),
83
+ 'model': Completion.model,
84
  'choices': [{
85
+ 'text': response.text,
86
+ 'index': 0,
87
+ 'logprobs': None,
88
+ 'finish_reason': 'stop'
89
+ }],
90
  'usage': {
91
+ 'prompt_chars': len(prompt),
92
+ 'completion_chars': len(response.text),
93
+ 'total_chars': len(prompt) + len(response.text)
94
  }
95
  })
96
 
97
+
98
  class StreamCompletion:
99
  model = {
100
  'model': {
101
+ 'id': 'gpt-3.5-turbo',
102
+ 'name': 'Default (GPT-3.5)'
103
  }
104
  }
105
 
106
  def create(
107
+ prompt: str = 'hello world',
108
+ messages: list = []) -> T3nsorResponse:
 
109
  print('t3nsor api is down, this may not work, refer to another module')
110
 
111
+ response = post('https://www.t3nsor.tech/api/chat', headers=headers, stream=True, json=Completion.model | {
112
+ 'messages': messages,
113
+ 'key': '',
114
+ 'prompt': prompt
115
  })
116
+
117
+ for chunk in response.iter_content(chunk_size=2046):
118
  yield T3nsorResponse({
119
+ 'id': f'cmpl-1337-{int(time())}',
120
+ 'object': 'text_completion',
121
+ 'created': int(time()),
122
+ 'model': Completion.model,
123
+
124
  'choices': [{
125
+ 'text': chunk.decode(),
126
+ 'index': 0,
127
+ 'logprobs': None,
128
+ 'finish_reason': 'stop'
129
  }],
130
+
131
  'usage': {
132
+ 'prompt_chars': len(prompt),
133
+ 'completion_chars': len(chunk.decode()),
134
+ 'total_chars': len(prompt) + len(chunk.decode())
135
  }
136
  })
unfinished/test.py CHANGED
@@ -1,12 +1,8 @@
1
- import gptbz
2
- import asyncio
3
-
4
-
5
  # asyncio.run(gptbz.test())
6
 
7
  import requests
8
 
9
  image = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAoALQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigDkZP+EhS4W0k1S+VntQPtEWmRsgkNwBu4ZsHYQNvTbls5BA6DS7uW6S6E0VwjQ3UsQM0Pl71DZUrydy4IAbvg8CsTx3DbHQLi4uVs9scWzdd+dsAaWI4PlfNjKjpzkDtmpoNSgbWYpLR7Ty5bq5trw/vd3nIowBxtzti53Y6fKT3z2djra56fNbv07HR1z13ZRX/jDyby0+02f9nfdmsEeHd5o/5anndwPkxjjPWuhrh9Mvra88RLqccmnOHtvLEqfaN+1r1lUcjbg4PbO4H+Cqk+hnRi9ZI29E0uC2N1eG3Am+13DITZRwuqlsYG0ZYEKCGJywwT2AtWTapcW1vcPPCiyrE5ils2SRQV+dW/ecMT/3zgj5utZtpdwL4e190e02W9xeb9vm7FOWY78/NnnJ28f3ahkgtptD8JRlbMos9s8QPnbcrEzDy/4sgDjzOMdeaSZbi23f8vmbfn6hBFuktmuWWPJWCNELNuxgbpcDj1Pbr2qJ9bMVyIZNK1JVLyr5qwB1AjUNu+Uk4bovGSRjAqCTwdoElv5B02MReT5G1HZfk8zzMcEfx81YlsJ7NJX0tolZzNK8dyZJA8jDIwd3yjcBkAHjOAM09SP3b/q36mkjiSNXAYBgCNykH8QeRWdfaw1ldSW66XqN0UgE++3iBRsvt2BiQN/8WPQZqharF9oN5osVml1NLbLqUbmUFY/L4CrgYYKy4yoGM5xjhlnc2OoeMrfULV7aQXGkExyYlErJ5oPQ/Jtye/zZ9qLgqaTba0NyzvPtizH7NcQeVM8OJ49u/acbl9VPY96s1geFjF/xOhF9m41Wfd9n8z73BO7f/Fzzt+X0q7c6mWvRY2DwSXcUsQuUff8Auo2ySflB+YqrYyQOmTyARPQmVP32kLqF1cbmsrJZkuni3rcfZ98UfzKvJJUE4JOM5wpODwDl3Meuf2rHbRatcBJXuj5iachjhUovlBmZudrNkEZ3HIOMGlhREhbS9He2a8MO6a4fzmGDMQ3zAk5yZ8DzMgj0yRuWdha2CzLawrEJpnnkx/G7HLMfc0bl3VNf5pff/kVLS8uxFHHJZ3s5Xyo2mZI4y2VBZyN44B6gDrwAcVZ069Go2EV2Le5t/MBPlXMZjkXnGGU9OlULSdbfTt8LWy5mt0JAkK4YRLjnnODx26Z71TXULEWn/CUWDwmxeDbM4WbkCXJbaB23SnlM5PUDNF7CcObZf12OlpCcDoTz2oVlcZVgRkjIPccGo7hgsSk7ceYg+bP94elUYpamda64915GdH1SESxiTM0KjZmTZtbDHB53Y/u89eK1qw4xD9l0mIC3wLdCg/eYwHh+73x0+9znb71uUkXUSWyCiiimZhRRRQBieL5Hj8LXjxySxuNmGivFtWHzr0lbhfx69O9MvHdZpbKKWYnUluNji+VGikVFULHnkdGbjO05JHPEviyF5/DF7HGkjuQpCx2i3THDA8RNw3Tv069qR0kk0i4uFilF3bSXTwE2a+YGzIAUQnnIPByN46kbjUPc6YNKC9X+SLtjeB9Mt5ZyqzbI1lQzK5R2C/KWGAT8w6dcjHUVzemSyxeCba9e5uWfzIgxl1aOTgXPebGw5BwR3ACdalna8+0R3Kx3nk6jc2MvkjTI2MH97zDnI+4uWOSny4z2Lqxmt/hytvHHIZhFHJsj0yJnyXDEfZ87M9cjPB56ik2y4xSsu7XcnjMsejeJszXBZZrgozaihZAYwQFfGIQM8Bvu9ehrTKuJtOg3y5gKs/8ApAy2Y5B846uMj8Tz/CaqzROH1C3EchW6uHGRZIVx9nHXs4yPvN1PydBV2Lc+u3eUkCJBDtZoAFJzJna/VjgjI/h/4EaaM5PS/wDXRF+iiirOcy7RZE8RanukmKPFA6q9yHVfvg7Y+qfd5J4Y9OhrJ8Nm4FxYJNNdORaXCsJtTS4yVnAyQoG5sfxfw/dPJrUslmGt6rcymQxM0MMStahMALk4cfM65c9cBSGA7mqmi2k9t/ZZuDJJKbSdpHNjHEdzyRvhtv3G5PyjIbBJOVqDpurP5d+zGWtzeLdahZQLNK895PiV7+N/IURKQQMEqNzKAm1tucnggG4Fkhs4INNuJL145oEuHa7BcIAuWOQRkrhiAFzkkEE8rNDJPczWtnG1rG7yfapvsqESsY1AIJPP3hztbPllTjHKvpv2CWKbTUSHdJCk8cVtH+8jUFOSNpGAynOTgJgL1BNRNxf9fmWNGa3fR7U2ty9zDswJZJxMzHvlwSCc5BwccVerBZ3tLf8Atqyguvsxt/n02OyUSsxk3FsHa24bnyM4ycgE9d1WDDIz1I5BHQ471SM6i1uY8cjjSIWLyFjLbDJu1J5Mefn6HryP4snH3hRdmTS5f7T82aS2WBY5Y5LpVjX94Pn+YYzhmydw4UDB4wio/wDY8K+XLuE1qcfY1B4MWfk6DHOT/Bg4+6K1zGkkHlSoroy7WVlGCCOQRSsU5JGUrPo96EZ5p7O7mmmlubm7XFqQoYIobB2fK3Aztwe3TQvX2QKQSMyxDiQJ1dR1P8u/TvWb5bWty2m3KTXlvqMs7Ky2ieVbqVBKSEcHJL4JB3ZwfeLfcQRnTpY7mT7PLZiOdbJSkillzgA44KMScLsBBAOBkuNxu0/6epcQv9s0+LfJzauxBuVJJDRckdXPJ+YcDJH8QrTrN2sNcsxsk2LZyjd9nXaCWj439VPH3RwcZ/hFaVNGc+gUUUUyAooooAxfFVxZxeG9RS7ltVQ25ytwzbCCQBkJ82MkD5eeah0G7tYLi/sZJrKO4fUbjy4oncM/SQ5D9Ww4J25Xniiis2/eO2FNOhf1/CxmamsEGp2+nzx2CwxajYyWKN9o3KdpX+Ebd2I2287ePm973i3UdMg0W+0y4mtUkNqJPKuBJ5ewuEBYx8gbiBxz+FFFS3ZM1p01OdNN/wBaFfVtU0qHxHplx9qsSkEl2853SvIjxwjdtCZXIX7wbt05q7YJdS6nc6vYxWEtpfi2KS+bKsjQhCSWBBG4bhtAAyCcmiinF3k0RWgqdKMl1VvxZfM2s+VkWFh5nl5x9tfG/djGfK6bec468Y/irN1CeUCeHXbrTItPc3O6GN5PNltxHx0I+YKXLYB42455ooqpaIwo2lO1rE1rZjUYrcCO2Giw/Zp7BYzKrkKu4bh8oAB2EA56HIz0u3uxL+1kbygQpQFt2fmki4GOOuOvfHbNFFPpcTu6nKFpsTU75V8oNJKXIXduOI4hk54zjHTjGO+a0KKKaM59PQxLqNNBMuoQpDFYJEfPQLISp8zcWAXIxh5CcLnOMnHQaFNKkkvtOFoli0k9xqP32Zn24LIFyM7kwRg98c5yUVL3No6xTfV2/IrxyW0vh21kQ2phaexKn97s5aErj+LPTbnj7u7+KujoopxZNZW+9/oQXdpBfWk1rcxiSGVGjdSSMhgQeRyOCRxWOtvbXU0Ol6mIHksJbea0IMoJYISGy3U5ST+JuB83uUUMVJuz121JnaL/AITOBSYPOGnyEA7/ADdvmJnH8G3IHX5s4xxmtmiihdRVFZR9AoooqjI//9k='
10
 
11
- response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) #.split('base64,')[1])
12
- print(response.content)
 
 
 
 
 
1
  # asyncio.run(gptbz.test())
2
 
3
  import requests
4
 
5
  image = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAoALQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigDkZP+EhS4W0k1S+VntQPtEWmRsgkNwBu4ZsHYQNvTbls5BA6DS7uW6S6E0VwjQ3UsQM0Pl71DZUrydy4IAbvg8CsTx3DbHQLi4uVs9scWzdd+dsAaWI4PlfNjKjpzkDtmpoNSgbWYpLR7Ty5bq5trw/vd3nIowBxtzti53Y6fKT3z2djra56fNbv07HR1z13ZRX/jDyby0+02f9nfdmsEeHd5o/5anndwPkxjjPWuhrh9Mvra88RLqccmnOHtvLEqfaN+1r1lUcjbg4PbO4H+Cqk+hnRi9ZI29E0uC2N1eG3Am+13DITZRwuqlsYG0ZYEKCGJywwT2AtWTapcW1vcPPCiyrE5ils2SRQV+dW/ecMT/3zgj5utZtpdwL4e190e02W9xeb9vm7FOWY78/NnnJ28f3ahkgtptD8JRlbMos9s8QPnbcrEzDy/4sgDjzOMdeaSZbi23f8vmbfn6hBFuktmuWWPJWCNELNuxgbpcDj1Pbr2qJ9bMVyIZNK1JVLyr5qwB1AjUNu+Uk4bovGSRjAqCTwdoElv5B02MReT5G1HZfk8zzMcEfx81YlsJ7NJX0tolZzNK8dyZJA8jDIwd3yjcBkAHjOAM09SP3b/q36mkjiSNXAYBgCNykH8QeRWdfaw1ldSW66XqN0UgE++3iBRsvt2BiQN/8WPQZqharF9oN5osVml1NLbLqUbmUFY/L4CrgYYKy4yoGM5xjhlnc2OoeMrfULV7aQXGkExyYlErJ5oPQ/Jtye/zZ9qLgqaTba0NyzvPtizH7NcQeVM8OJ49u/acbl9VPY96s1geFjF/xOhF9m41Wfd9n8z73BO7f/Fzzt+X0q7c6mWvRY2DwSXcUsQuUff8Auo2ySflB+YqrYyQOmTyARPQmVP32kLqF1cbmsrJZkuni3rcfZ98UfzKvJJUE4JOM5wpODwDl3Meuf2rHbRatcBJXuj5iachjhUovlBmZudrNkEZ3HIOMGlhREhbS9He2a8MO6a4fzmGDMQ3zAk5yZ8DzMgj0yRuWdha2CzLawrEJpnnkx/G7HLMfc0bl3VNf5pff/kVLS8uxFHHJZ3s5Xyo2mZI4y2VBZyN44B6gDrwAcVZ069Go2EV2Le5t/MBPlXMZjkXnGGU9OlULSdbfTt8LWy5mt0JAkK4YRLjnnODx26Z71TXULEWn/CUWDwmxeDbM4WbkCXJbaB23SnlM5PUDNF7CcObZf12OlpCcDoTz2oVlcZVgRkjIPccGo7hgsSk7ceYg+bP94elUYpamda64915GdH1SESxiTM0KjZmTZtbDHB53Y/u89eK1qw4xD9l0mIC3wLdCg/eYwHh+73x0+9znb71uUkXUSWyCiiimZhRRRQBieL5Hj8LXjxySxuNmGivFtWHzr0lbhfx69O9MvHdZpbKKWYnUluNji+VGikVFULHnkdGbjO05JHPEviyF5/DF7HGkjuQpCx2i3THDA8RNw3Tv069qR0kk0i4uFilF3bSXTwE2a+YGzIAUQnnIPByN46kbjUPc6YNKC9X+SLtjeB9Mt5ZyqzbI1lQzK5R2C/KWGAT8w6dcjHUVzemSyxeCba9e5uWfzIgxl1aOTgXPebGw5BwR3ACdalna8+0R3Kx3nk6jc2MvkjTI2MH97zDnI+4uWOSny4z2Lqxmt/hytvHHIZhFHJsj0yJnyXDEfZ87M9cjPB56ik2y4xSsu7XcnjMsejeJszXBZZrgozaihZAYwQFfGIQM8Bvu9ehrTKuJtOg3y5gKs/8ApAy2Y5B846uMj8Tz/CaqzROH1C3EchW6uHGRZIVx9nHXs4yPvN1PydBV2Lc+u3eUkCJBDtZoAFJzJna/VjgjI/h/4EaaM5PS/wDXRF+iiirOcy7RZE8RanukmKPFA6q9yHVfvg7Y+qfd5J4Y9OhrJ8Nm4FxYJNNdORaXCsJtTS4yVnAyQoG5sfxfw/dPJrUslmGt6rcymQxM0MMStahMALk4cfM65c9cBSGA7mqmi2k9t/ZZuDJJKbSdpHNjHEdzyRvhtv3G5PyjIbBJOVqDpurP5d+zGWtzeLdahZQLNK895PiV7+N/IURKQQMEqNzKAm1tucnggG4Fkhs4INNuJL145oEuHa7BcIAuWOQRkrhiAFzkkEE8rNDJPczWtnG1rG7yfapvsqESsY1AIJPP3hztbPllTjHKvpv2CWKbTUSHdJCk8cVtH+8jUFOSNpGAynOTgJgL1BNRNxf9fmWNGa3fR7U2ty9zDswJZJxMzHvlwSCc5BwccVerBZ3tLf8Atqyguvsxt/n02OyUSsxk3FsHa24bnyM4ycgE9d1WDDIz1I5BHQ471SM6i1uY8cjjSIWLyFjLbDJu1J5Mefn6HryP4snH3hRdmTS5f7T82aS2WBY5Y5LpVjX94Pn+YYzhmydw4UDB4wio/wDY8K+XLuE1qcfY1B4MWfk6DHOT/Bg4+6K1zGkkHlSoroy7WVlGCCOQRSsU5JGUrPo96EZ5p7O7mmmlubm7XFqQoYIobB2fK3Aztwe3TQvX2QKQSMyxDiQJ1dR1P8u/TvWb5bWty2m3KTXlvqMs7Ky2ieVbqVBKSEcHJL4JB3ZwfeLfcQRnTpY7mT7PLZiOdbJSkillzgA44KMScLsBBAOBkuNxu0/6epcQv9s0+LfJzauxBuVJJDRckdXPJ+YcDJH8QrTrN2sNcsxsk2LZyjd9nXaCWj439VPH3RwcZ/hFaVNGc+gUUUUyAooooAxfFVxZxeG9RS7ltVQ25ytwzbCCQBkJ82MkD5eeah0G7tYLi/sZJrKO4fUbjy4oncM/SQ5D9Ww4J25Xniiis2/eO2FNOhf1/CxmamsEGp2+nzx2CwxajYyWKN9o3KdpX+Ebd2I2287ePm973i3UdMg0W+0y4mtUkNqJPKuBJ5ewuEBYx8gbiBxz+FFFS3ZM1p01OdNN/wBaFfVtU0qHxHplx9qsSkEl2853SvIjxwjdtCZXIX7wbt05q7YJdS6nc6vYxWEtpfi2KS+bKsjQhCSWBBG4bhtAAyCcmiinF3k0RWgqdKMl1VvxZfM2s+VkWFh5nl5x9tfG/djGfK6bec468Y/irN1CeUCeHXbrTItPc3O6GN5PNltxHx0I+YKXLYB42455ooqpaIwo2lO1rE1rZjUYrcCO2Giw/Zp7BYzKrkKu4bh8oAB2EA56HIz0u3uxL+1kbygQpQFt2fmki4GOOuOvfHbNFFPpcTu6nKFpsTU75V8oNJKXIXduOI4hk54zjHTjGO+a0KKKaM59PQxLqNNBMuoQpDFYJEfPQLISp8zcWAXIxh5CcLnOMnHQaFNKkkvtOFoli0k9xqP32Zn24LIFyM7kwRg98c5yUVL3No6xTfV2/IrxyW0vh21kQ2phaexKn97s5aErj+LPTbnj7u7+KujoopxZNZW+9/oQXdpBfWk1rcxiSGVGjdSSMhgQeRyOCRxWOtvbXU0Ol6mIHksJbea0IMoJYISGy3U5ST+JuB83uUUMVJuz121JnaL/AITOBSYPOGnyEA7/ADdvmJnH8G3IHX5s4xxmtmiihdRVFZR9AoooqjI//9k='
6
 
7
+ response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) # .split('base64,')[1])
8
+ print(response.content)
unfinished/theb.ai/__init__.py CHANGED
@@ -1,46 +1,49 @@
1
- from curl_cffi import requests
2
- from json import loads
3
- from re import findall
4
- from threading import Thread
5
- from queue import Queue, Empty
 
 
6
 
7
  class Completion:
8
  # experimental
9
  part1 = '{"role":"assistant","id":"chatcmpl'
10
  part2 = '"},"index":0,"finish_reason":null}]}}'
11
  regex = rf'{part1}(.*){part2}'
12
-
13
- timer = None
14
- message_queue = Queue()
15
  stream_completed = False
16
-
17
  def request():
18
  headers = {
19
- 'authority' : 'chatbot.theb.ai',
20
  'content-type': 'application/json',
21
- 'origin' : 'https://chatbot.theb.ai',
22
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
23
  }
24
 
25
- requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers, content_callback=Completion.handle_stream_response,
26
- json = {
27
- 'prompt' : 'hello world',
28
- 'options': {}
29
- }
30
- )
 
31
 
32
  Completion.stream_completed = True
33
 
34
  @staticmethod
35
  def create():
36
  Thread(target=Completion.request).start()
37
-
38
  while Completion.stream_completed != True or not Completion.message_queue.empty():
39
  try:
40
  message = Completion.message_queue.get(timeout=0.01)
41
  for message in findall(Completion.regex, message):
42
  yield loads(Completion.part1 + message + Completion.part2)
43
-
44
  except Empty:
45
  pass
46
 
@@ -48,10 +51,12 @@ class Completion:
48
  def handle_stream_response(response):
49
  Completion.message_queue.put(response.decode())
50
 
 
51
  def start():
52
  for message in Completion.create():
53
  yield message['delta']
54
 
 
55
  if __name__ == '__main__':
56
  for message in start():
57
  print(message)
 
1
+ from json import loads
2
+ from queue import Queue, Empty
3
+ from re import findall
4
+ from threading import Thread
5
+
6
+ from curl_cffi import requests
7
+
8
 
9
  class Completion:
10
  # experimental
11
  part1 = '{"role":"assistant","id":"chatcmpl'
12
  part2 = '"},"index":0,"finish_reason":null}]}}'
13
  regex = rf'{part1}(.*){part2}'
14
+
15
+ timer = None
16
+ message_queue = Queue()
17
  stream_completed = False
18
+
19
  def request():
20
  headers = {
21
+ 'authority': 'chatbot.theb.ai',
22
  'content-type': 'application/json',
23
+ 'origin': 'https://chatbot.theb.ai',
24
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
25
  }
26
 
27
+ requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers,
28
+ content_callback=Completion.handle_stream_response,
29
+ json={
30
+ 'prompt': 'hello world',
31
+ 'options': {}
32
+ }
33
+ )
34
 
35
  Completion.stream_completed = True
36
 
37
  @staticmethod
38
  def create():
39
  Thread(target=Completion.request).start()
40
+
41
  while Completion.stream_completed != True or not Completion.message_queue.empty():
42
  try:
43
  message = Completion.message_queue.get(timeout=0.01)
44
  for message in findall(Completion.regex, message):
45
  yield loads(Completion.part1 + message + Completion.part2)
46
+
47
  except Empty:
48
  pass
49
 
 
51
  def handle_stream_response(response):
52
  Completion.message_queue.put(response.decode())
53
 
54
+
55
  def start():
56
  for message in Completion.create():
57
  yield message['delta']
58
 
59
+
60
  if __name__ == '__main__':
61
  for message in start():
62
  print(message)
unfinished/vercelai/v2.py CHANGED
@@ -1,6 +1,5 @@
1
  import requests
2
 
3
-
4
  token = requests.get('https://play.vercel.ai/openai.jpeg', headers={
5
  'authority': 'play.vercel.ai',
6
  'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
@@ -15,7 +14,7 @@ headers = {
15
  'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
16
  }
17
 
18
- for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream = True, json = {
19
  'prompt': 'hi',
20
  'model': 'openai:gpt-3.5-turbo',
21
  'temperature': 0.7,
@@ -25,5 +24,4 @@ for chunk in requests.post('https://play.vercel.ai/api/generate', headers=header
25
  'frequencyPenalty': 1,
26
  'presencePenalty': 1,
27
  'stopSequences': []}).iter_lines():
28
-
29
- print(chunk)
 
1
  import requests
2
 
 
3
  token = requests.get('https://play.vercel.ai/openai.jpeg', headers={
4
  'authority': 'play.vercel.ai',
5
  'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
 
14
  'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
15
  }
16
 
17
+ for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream=True, json={
18
  'prompt': 'hi',
19
  'model': 'openai:gpt-3.5-turbo',
20
  'temperature': 0.7,
 
24
  'frequencyPenalty': 1,
25
  'presencePenalty': 1,
26
  'stopSequences': []}).iter_lines():
27
+ print(chunk)
 
unfinished/writesonic/__init__.py CHANGED
@@ -1,29 +1,33 @@
1
- from requests import Session
2
- from names import get_first_name, get_last_name
3
- from random import choice
4
- from requests import post
5
- from time import time
6
- from colorama import Fore, init; init()
 
 
 
 
7
 
8
  class logger:
9
  @staticmethod
10
  def info(string) -> print:
11
  import datetime
12
  now = datetime.datetime.now()
13
- return print(f"{Fore.CYAN}{now.strftime('%Y-%m-%d %H:%M:%S')} {Fore.BLUE}INFO {Fore.MAGENTA}__main__ -> {Fore.RESET}{string}")
 
 
14
 
15
  class SonicResponse:
16
-
17
  class Completion:
18
-
19
  class Choices:
20
  def __init__(self, choice: dict) -> None:
21
- self.text = choice['text']
22
- self.content = self.text.encode()
23
- self.index = choice['index']
24
- self.logprobs = choice['logprobs']
25
- self.finish_reason = choice['finish_reason']
26
-
27
  def __repr__(self) -> str:
28
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
29
 
@@ -32,127 +36,128 @@ class SonicResponse:
32
 
33
  class Usage:
34
  def __init__(self, usage_dict: dict) -> None:
35
- self.prompt_tokens = usage_dict['prompt_chars']
36
- self.completion_tokens = usage_dict['completion_chars']
37
- self.total_tokens = usage_dict['total_chars']
38
 
39
  def __repr__(self):
40
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
41
-
42
  def __init__(self, response_dict: dict) -> None:
43
-
44
- self.response_dict = response_dict
45
- self.id = response_dict['id']
46
- self.object = response_dict['object']
47
- self.created = response_dict['created']
48
- self.model = response_dict['model']
49
- self.completion = self.Completion(response_dict['choices'])
50
- self.usage = self.Usage(response_dict['usage'])
51
 
52
  def json(self) -> dict:
53
  return self.response_dict
54
-
 
55
  class Account:
56
  session = Session()
57
  session.headers = {
58
- "connection" : "keep-alive",
59
- "sec-ch-ua" : "\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"",
60
- "accept" : "application/json, text/plain, */*",
61
- "content-type" : "application/json",
62
- "sec-ch-ua-mobile" : "?0",
63
- "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
64
  "sec-ch-ua-platform": "\"Windows\"",
65
- "sec-fetch-site" : "same-origin",
66
- "sec-fetch-mode" : "cors",
67
- "sec-fetch-dest" : "empty",
68
  # "accept-encoding" : "gzip, deflate, br",
69
- "accept-language" : "en-GB,en-US;q=0.9,en;q=0.8",
70
- "cookie" : ""
71
  }
72
-
73
  @staticmethod
74
  def get_user():
75
  password = f'0opsYouGoTme@1234'
76
- f_name = get_first_name()
77
- l_name = get_last_name()
78
- hosts = ['gmail.com', 'protonmail.com', 'proton.me', 'outlook.com']
79
-
80
  return {
81
- "email" : f"{f_name.lower()}.{l_name.lower()}@{choice(hosts)}",
82
- "password" : password,
83
- "confirm_password" : password,
84
- "full_name" : f'{f_name} {l_name}'
85
  }
86
 
87
  @staticmethod
88
  def create(logging: bool = False):
89
  while True:
90
  try:
91
- user = Account.get_user()
92
- start = time()
93
- response = Account.session.post("https://app.writesonic.com/api/session-login", json = user | {
94
- "utmParams" : "{}",
95
- "visitorId" : "0",
96
- "locale" : "en",
97
- "userAgent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
98
- "signInWith" : "password",
99
- "request_type" : "signup",
100
  })
101
-
102
  if logging:
103
  logger.info(f"\x1b[31mregister success\x1b[0m : '{response.text[:30]}...' ({int(time() - start)}s)")
104
  logger.info(f"\x1b[31mid\x1b[0m : '{response.json()['id']}'")
105
  logger.info(f"\x1b[31mtoken\x1b[0m : '{response.json()['token'][:30]}...'")
106
-
107
  start = time()
108
- response = Account.session.post("https://api.writesonic.com/v1/business/set-business-active", headers={"authorization": "Bearer " + response.json()['token']})
 
109
  key = response.json()["business"]["api_key"]
110
  if logging: logger.info(f"\x1b[31mgot key\x1b[0m : '{key}' ({int(time() - start)}s)")
111
 
112
  return Account.AccountResponse(user['email'], user['password'], key)
113
-
114
  except Exception as e:
115
  if logging: logger.info(f"\x1b[31merror\x1b[0m : '{e}'")
116
  continue
117
-
118
  class AccountResponse:
119
  def __init__(self, email, password, key):
120
- self.email = email
121
  self.password = password
122
- self.key = key
123
-
124
 
125
  class Completion:
126
  def create(
127
- api_key: str,
128
- prompt: str,
129
- enable_memory: bool = False,
130
- enable_google_results: bool = False,
131
- history_data: list = []) -> SonicResponse:
132
-
133
- response = post('https://api.writesonic.com/v2/business/content/chatsonic?engine=premium', headers = {"X-API-KEY": api_key},
134
- json = {
135
- "enable_memory" : enable_memory,
136
- "enable_google_results" : enable_google_results,
137
- "input_text" : prompt,
138
- "history_data" : history_data}).json()
139
 
140
  return SonicResponse({
141
- 'id' : f'cmpl-premium-{int(time())}',
142
- 'object' : 'text_completion',
143
- 'created': int(time()),
144
- 'model' : 'premium',
145
-
146
- 'choices': [{
147
- 'text' : response['message'],
148
- 'index' : 0,
149
- 'logprobs' : None,
150
- 'finish_reason' : 'stop'
151
- }],
152
-
153
- 'usage': {
154
- 'prompt_chars' : len(prompt),
155
- 'completion_chars' : len(response['message']),
156
- 'total_chars' : len(prompt) + len(response['message'])
157
- }
158
- })
 
1
+ from random import choice
2
+ from time import time
3
+
4
+ from colorama import Fore, init;
5
+ from names import get_first_name, get_last_name
6
+ from requests import Session
7
+ from requests import post
8
+
9
+ init()
10
+
11
 
12
  class logger:
13
  @staticmethod
14
  def info(string) -> print:
15
  import datetime
16
  now = datetime.datetime.now()
17
+ return print(
18
+ f"{Fore.CYAN}{now.strftime('%Y-%m-%d %H:%M:%S')} {Fore.BLUE}INFO {Fore.MAGENTA}__main__ -> {Fore.RESET}{string}")
19
+
20
 
21
  class SonicResponse:
 
22
  class Completion:
 
23
  class Choices:
24
  def __init__(self, choice: dict) -> None:
25
+ self.text = choice['text']
26
+ self.content = self.text.encode()
27
+ self.index = choice['index']
28
+ self.logprobs = choice['logprobs']
29
+ self.finish_reason = choice['finish_reason']
30
+
31
  def __repr__(self) -> str:
32
  return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
33
 
 
36
 
37
  class Usage:
38
  def __init__(self, usage_dict: dict) -> None:
39
+ self.prompt_tokens = usage_dict['prompt_chars']
40
+ self.completion_tokens = usage_dict['completion_chars']
41
+ self.total_tokens = usage_dict['total_chars']
42
 
43
  def __repr__(self):
44
  return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
45
+
46
  def __init__(self, response_dict: dict) -> None:
47
+ self.response_dict = response_dict
48
+ self.id = response_dict['id']
49
+ self.object = response_dict['object']
50
+ self.created = response_dict['created']
51
+ self.model = response_dict['model']
52
+ self.completion = self.Completion(response_dict['choices'])
53
+ self.usage = self.Usage(response_dict['usage'])
 
54
 
55
  def json(self) -> dict:
56
  return self.response_dict
57
+
58
+
59
  class Account:
60
  session = Session()
61
  session.headers = {
62
+ "connection": "keep-alive",
63
+ "sec-ch-ua": "\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"",
64
+ "accept": "application/json, text/plain, */*",
65
+ "content-type": "application/json",
66
+ "sec-ch-ua-mobile": "?0",
67
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
68
  "sec-ch-ua-platform": "\"Windows\"",
69
+ "sec-fetch-site": "same-origin",
70
+ "sec-fetch-mode": "cors",
71
+ "sec-fetch-dest": "empty",
72
  # "accept-encoding" : "gzip, deflate, br",
73
+ "accept-language": "en-GB,en-US;q=0.9,en;q=0.8",
74
+ "cookie": ""
75
  }
76
+
77
  @staticmethod
78
  def get_user():
79
  password = f'0opsYouGoTme@1234'
80
+ f_name = get_first_name()
81
+ l_name = get_last_name()
82
+ hosts = ['gmail.com', 'protonmail.com', 'proton.me', 'outlook.com']
83
+
84
  return {
85
+ "email": f"{f_name.lower()}.{l_name.lower()}@{choice(hosts)}",
86
+ "password": password,
87
+ "confirm_password": password,
88
+ "full_name": f'{f_name} {l_name}'
89
  }
90
 
91
  @staticmethod
92
  def create(logging: bool = False):
93
  while True:
94
  try:
95
+ user = Account.get_user()
96
+ start = time()
97
+ response = Account.session.post("https://app.writesonic.com/api/session-login", json=user | {
98
+ "utmParams": "{}",
99
+ "visitorId": "0",
100
+ "locale": "en",
101
+ "userAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
102
+ "signInWith": "password",
103
+ "request_type": "signup",
104
  })
105
+
106
  if logging:
107
  logger.info(f"\x1b[31mregister success\x1b[0m : '{response.text[:30]}...' ({int(time() - start)}s)")
108
  logger.info(f"\x1b[31mid\x1b[0m : '{response.json()['id']}'")
109
  logger.info(f"\x1b[31mtoken\x1b[0m : '{response.json()['token'][:30]}...'")
110
+
111
  start = time()
112
+ response = Account.session.post("https://api.writesonic.com/v1/business/set-business-active",
113
+ headers={"authorization": "Bearer " + response.json()['token']})
114
  key = response.json()["business"]["api_key"]
115
  if logging: logger.info(f"\x1b[31mgot key\x1b[0m : '{key}' ({int(time() - start)}s)")
116
 
117
  return Account.AccountResponse(user['email'], user['password'], key)
118
+
119
  except Exception as e:
120
  if logging: logger.info(f"\x1b[31merror\x1b[0m : '{e}'")
121
  continue
122
+
123
  class AccountResponse:
124
  def __init__(self, email, password, key):
125
+ self.email = email
126
  self.password = password
127
+ self.key = key
128
+
129
 
130
  class Completion:
131
  def create(
132
+ api_key: str,
133
+ prompt: str,
134
+ enable_memory: bool = False,
135
+ enable_google_results: bool = False,
136
+ history_data: list = []) -> SonicResponse:
137
+ response = post('https://api.writesonic.com/v2/business/content/chatsonic?engine=premium',
138
+ headers={"X-API-KEY": api_key},
139
+ json={
140
+ "enable_memory": enable_memory,
141
+ "enable_google_results": enable_google_results,
142
+ "input_text": prompt,
143
+ "history_data": history_data}).json()
144
 
145
  return SonicResponse({
146
+ 'id': f'cmpl-premium-{int(time())}',
147
+ 'object': 'text_completion',
148
+ 'created': int(time()),
149
+ 'model': 'premium',
150
+
151
+ 'choices': [{
152
+ 'text': response['message'],
153
+ 'index': 0,
154
+ 'logprobs': None,
155
+ 'finish_reason': 'stop'
156
+ }],
157
+
158
+ 'usage': {
159
+ 'prompt_chars': len(prompt),
160
+ 'completion_chars': len(response['message']),
161
+ 'total_chars': len(prompt) + len(response['message'])
162
+ }
163
+ })
you/__init__.py CHANGED
@@ -9,19 +9,19 @@ from tls_client import Session
9
  class Completion:
10
  @staticmethod
11
  def create(
12
- prompt: str,
13
- page: int = 1,
14
- count: int = 10,
15
- safe_search: str = 'Moderate',
16
- on_shopping_page: bool = False,
17
- mkt: str = '',
18
- response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
19
- domain: str = 'youchat',
20
- query_trace_id: str = None,
21
- chat: list = None,
22
- include_links: bool = False,
23
- detailed: bool = False,
24
- debug: bool = False,
25
  ) -> dict:
26
  if chat is None:
27
  chat = []
 
9
  class Completion:
10
  @staticmethod
11
  def create(
12
+ prompt: str,
13
+ page: int = 1,
14
+ count: int = 10,
15
+ safe_search: str = 'Moderate',
16
+ on_shopping_page: bool = False,
17
+ mkt: str = '',
18
+ response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
19
+ domain: str = 'youchat',
20
+ query_trace_id: str = None,
21
+ chat: list = None,
22
+ include_links: bool = False,
23
+ detailed: bool = False,
24
+ debug: bool = False,
25
  ) -> dict:
26
  if chat is None:
27
  chat = []