Niansuh commited on
Commit
f48dc4d
·
verified ·
1 Parent(s): 426c357

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +248 -227
api/utils.py CHANGED
@@ -1,227 +1,248 @@
1
- from datetime import datetime
2
- import json
3
- import uuid
4
- import asyncio
5
- import random
6
- import string
7
- from typing import Any, Dict, Optional
8
-
9
- import httpx
10
- from fastapi import HTTPException
11
- from api.config import (
12
- MODEL_MAPPING,
13
- get_headers_api_chat,
14
- get_headers_chat,
15
- BASE_URL,
16
- AGENT_MODE,
17
- TRENDING_AGENT_MODE,
18
- MODEL_PREFIXES,
19
- MODEL_REFERERS
20
- )
21
- from api.models import ChatRequest
22
- from api.logger import setup_logger
23
-
24
- logger = setup_logger(__name__)
25
-
26
- # Helper function to create a random alphanumeric chat ID
27
- def generate_chat_id(length: int = 7) -> str:
28
- characters = string.ascii_letters + string.digits
29
- return ''.join(random.choices(characters, k=length))
30
-
31
- # Helper function to create chat completion data
32
- def create_chat_completion_data(
33
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
34
- ) -> Dict[str, Any]:
35
- return {
36
- "id": f"chatcmpl-{uuid.uuid4()}",
37
- "object": "chat.completion.chunk",
38
- "created": timestamp,
39
- "model": model,
40
- "choices": [
41
- {
42
- "index": 0,
43
- "delta": {"content": content, "role": "assistant"},
44
- "finish_reason": finish_reason,
45
- }
46
- ],
47
- "usage": None,
48
- }
49
-
50
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
51
- def message_to_dict(message, model_prefix: Optional[str] = None):
52
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
53
- if model_prefix:
54
- content = f"{model_prefix} {content}"
55
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
56
- # Ensure base64 images are always included for all models
57
- return {
58
- "role": message.role,
59
- "content": content,
60
- "data": {
61
- "imageBase64": message.content[1]["image_url"]["url"],
62
- "fileText": "",
63
- "title": "snapshot",
64
- },
65
- }
66
- return {"role": message.role, "content": content}
67
-
68
- # Function to strip model prefix from content if present
69
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
- """Remove the model prefix from the response content if present."""
71
- if model_prefix and content.startswith(model_prefix):
72
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
73
- return content[len(model_prefix):].strip()
74
- return content
75
-
76
- # Function to get the correct referer URL for logging
77
- def get_referer_url(chat_id: str, model: str) -> str:
78
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
79
- if model in MODEL_REFERERS:
80
- return f"{BASE_URL}/chat/{chat_id}?model={model}"
81
- return BASE_URL
82
-
83
- # Process streaming response with headers from config.py
84
- async def process_streaming_response(request: ChatRequest):
85
- chat_id = generate_chat_id()
86
- referer_url = get_referer_url(chat_id, request.model)
87
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
88
-
89
- agent_mode = AGENT_MODE.get(request.model, {})
90
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
91
- model_prefix = MODEL_PREFIXES.get(request.model, "")
92
-
93
- headers_api_chat = get_headers_api_chat(referer_url)
94
-
95
- if request.model == 'o1-preview':
96
- delay_seconds = random.randint(1, 60)
97
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
98
- await asyncio.sleep(delay_seconds)
99
-
100
- json_data = {
101
- "agentMode": agent_mode,
102
- "clickedAnswer2": False,
103
- "clickedAnswer3": False,
104
- "clickedForceWebSearch": False,
105
- "codeModelMode": True,
106
- "githubToken": None,
107
- "id": chat_id,
108
- "isChromeExt": False,
109
- "isMicMode": False,
110
- "maxTokens": request.max_tokens,
111
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
112
- "mobileClient": False,
113
- "playgroundTemperature": request.temperature,
114
- "playgroundTopP": request.top_p,
115
- "previewToken": None,
116
- "trendingAgentMode": trending_agent_mode,
117
- "userId": None,
118
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
119
- "userSystemPrompt": None,
120
- "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
121
- "visitFromDelta": False,
122
- }
123
-
124
- async with httpx.AsyncClient() as client:
125
- try:
126
- async with client.stream(
127
- "POST",
128
- f"{BASE_URL}/api/chat",
129
- headers=headers_api_chat,
130
- json=json_data,
131
- timeout=100,
132
- ) as response:
133
- response.raise_for_status()
134
- async for line in response.aiter_lines():
135
- timestamp = int(datetime.now().timestamp())
136
- if line:
137
- content = line
138
- if content.startswith("$@$v=undefined-rv1$@$"):
139
- content = content[21:]
140
- cleaned_content = strip_model_prefix(content, model_prefix)
141
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
142
-
143
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
144
- yield "data: [DONE]\n\n"
145
- except httpx.HTTPStatusError as e:
146
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
147
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
148
- except httpx.RequestError as e:
149
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
150
- raise HTTPException(status_code=500, detail=str(e))
151
-
152
- # Process non-streaming response with headers from config.py
153
- async def process_non_streaming_response(request: ChatRequest):
154
- chat_id = generate_chat_id()
155
- referer_url = get_referer_url(chat_id, request.model)
156
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
157
-
158
- agent_mode = AGENT_MODE.get(request.model, {})
159
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
160
- model_prefix = MODEL_PREFIXES.get(request.model, "")
161
-
162
- headers_api_chat = get_headers_api_chat(referer_url)
163
- headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
164
-
165
- if request.model == 'o1-preview':
166
- delay_seconds = random.randint(20, 60)
167
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
168
- await asyncio.sleep(delay_seconds)
169
-
170
- json_data = {
171
- "agentMode": agent_mode,
172
- "clickedAnswer2": False,
173
- "clickedAnswer3": False,
174
- "clickedForceWebSearch": False,
175
- "codeModelMode": True,
176
- "githubToken": None,
177
- "id": chat_id,
178
- "isChromeExt": False,
179
- "isMicMode": False,
180
- "maxTokens": request.max_tokens,
181
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
182
- "mobileClient": False,
183
- "playgroundTemperature": request.temperature,
184
- "playgroundTopP": request.top_p,
185
- "previewToken": None,
186
- "trendingAgentMode": trending_agent_mode,
187
- "userId": None,
188
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
189
- "userSystemPrompt": None,
190
- "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
191
- "visitFromDelta": False,
192
- }
193
-
194
- full_response = ""
195
- async with httpx.AsyncClient() as client:
196
- try:
197
- async with client.stream(
198
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
199
- ) as response:
200
- response.raise_for_status()
201
- async for chunk in response.aiter_text():
202
- full_response += chunk
203
- except httpx.HTTPStatusError as e:
204
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
205
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
206
- except httpx.RequestError as e:
207
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
208
- raise HTTPException(status_code=500, detail=str(e))
209
- if full_response.startswith("$@$v=undefined-rv1$@$"):
210
- full_response = full_response[21:]
211
-
212
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
213
-
214
- return {
215
- "id": f"chatcmpl-{uuid.uuid4()}",
216
- "object": "chat.completion",
217
- "created": int(datetime.now().timestamp()),
218
- "model": request.model,
219
- "choices": [
220
- {
221
- "index": 0,
222
- "message": {"role": "assistant", "content": cleaned_full_response},
223
- "finish_reason": "stop",
224
- }
225
- ],
226
- "usage": None,
227
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import json
3
+ import uuid
4
+ import asyncio
5
+ import random
6
+ import string
7
+ from typing import Any, Dict, Optional
8
+
9
+ import httpx
10
+ from fastapi import HTTPException
11
+ from api.config import (
12
+ MODEL_MAPPING,
13
+ get_headers_api_chat,
14
+ get_headers_chat,
15
+ BASE_URL,
16
+ AGENT_MODE,
17
+ TRENDING_AGENT_MODE,
18
+ MODEL_PREFIXES,
19
+ MODEL_REFERERS,
20
+ )
21
+ from api.models import ChatRequest
22
+ from api.logger import setup_logger
23
+
24
+ # Import the validate module
25
+ from api import validate
26
+
27
+ logger = setup_logger(__name__)
28
+
29
+ # Helper function to create a random alphanumeric chat ID
30
+ def generate_chat_id(length: int = 7) -> str:
31
+ characters = string.ascii_letters + string.digits
32
+ return ''.join(random.choices(characters, k=length))
33
+
34
+ # Helper function to create chat completion data
35
+ def create_chat_completion_data(
36
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
37
+ ) -> Dict[str, Any]:
38
+ return {
39
+ "id": f"chatcmpl-{uuid.uuid4()}",
40
+ "object": "chat.completion.chunk",
41
+ "created": timestamp,
42
+ "model": model,
43
+ "choices": [
44
+ {
45
+ "index": 0,
46
+ "delta": {"content": content, "role": "assistant"},
47
+ "finish_reason": finish_reason,
48
+ }
49
+ ],
50
+ "usage": None,
51
+ }
52
+
53
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
54
+ def message_to_dict(message, model_prefix: Optional[str] = None):
55
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
56
+ if model_prefix:
57
+ content = f"{model_prefix} {content}"
58
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
59
+ # Ensure base64 images are always included for all models
60
+ return {
61
+ "role": message.role,
62
+ "content": content,
63
+ "data": {
64
+ "imageBase64": message.content[1]["image_url"]["url"],
65
+ "fileText": "",
66
+ "title": "snapshot",
67
+ },
68
+ }
69
+ return {"role": message.role, "content": content}
70
+
71
+ # Function to strip model prefix from content if present
72
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
73
+ """Remove the model prefix from the response content if present."""
74
+ if model_prefix and content.startswith(model_prefix):
75
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
76
+ return content[len(model_prefix):].strip()
77
+ return content
78
+
79
+ # Function to get the correct referer URL for logging
80
+ def get_referer_url(chat_id: str, model: str) -> str:
81
+ """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
82
+ if model in MODEL_REFERERS:
83
+ return f"{BASE_URL}/chat/{chat_id}?model={model}"
84
+ return BASE_URL
85
+
86
+ # Process streaming response with headers from config.py
87
+ async def process_streaming_response(request: ChatRequest):
88
+ chat_id = generate_chat_id()
89
+ referer_url = get_referer_url(chat_id, request.model)
90
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
91
+
92
+ agent_mode = AGENT_MODE.get(request.model, {})
93
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
94
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
95
+
96
+ headers_api_chat = get_headers_api_chat(referer_url)
97
+
98
+ if request.model == 'o1-preview':
99
+ delay_seconds = random.randint(1, 60)
100
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
101
+ await asyncio.sleep(delay_seconds)
102
+
103
+ json_data = {
104
+ "agentMode": agent_mode,
105
+ "clickedAnswer2": False,
106
+ "clickedAnswer3": False,
107
+ "clickedForceWebSearch": False,
108
+ "codeModelMode": True,
109
+ "githubToken": None,
110
+ "id": chat_id,
111
+ "isChromeExt": False,
112
+ "isMicMode": False,
113
+ "maxTokens": request.max_tokens,
114
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
115
+ "mobileClient": False,
116
+ "playgroundTemperature": request.temperature,
117
+ "playgroundTopP": request.top_p,
118
+ "previewToken": None,
119
+ "trendingAgentMode": trending_agent_mode,
120
+ "userId": None,
121
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
122
+ "userSystemPrompt": None,
123
+ # Use validate.getHid() for the 'validated' field
124
+ "validated": validate.getHid(),
125
+ "visitFromDelta": False,
126
+ }
127
+
128
+ async with httpx.AsyncClient() as client:
129
+ try:
130
+ async with client.stream(
131
+ "POST",
132
+ f"{BASE_URL}/api/chat",
133
+ headers=headers_api_chat,
134
+ json=json_data,
135
+ timeout=100,
136
+ ) as response:
137
+ response.raise_for_status()
138
+ async for line in response.aiter_lines():
139
+ timestamp = int(datetime.now().timestamp())
140
+ if line:
141
+ content = line + "\n"
142
+ if "https://www.blackbox.ai" in content:
143
+ # Refresh hid and inform the user
144
+ validate.getHid(True)
145
+ content = "The HID has been refreshed; please try again.\n"
146
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
147
+ break
148
+ if content.startswith("$@$v=undefined-rv1$@$"):
149
+ content = content[21:]
150
+ cleaned_content = strip_model_prefix(content, model_prefix)
151
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
152
+
153
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
154
+ yield "data: [DONE]\n\n"
155
+ except httpx.HTTPStatusError as e:
156
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
157
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
158
+ except httpx.RequestError as e:
159
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
160
+ raise HTTPException(status_code=500, detail=str(e))
161
+
162
+ # Process non-streaming response with headers from config.py
163
+ async def process_non_streaming_response(request: ChatRequest):
164
+ chat_id = generate_chat_id()
165
+ referer_url = get_referer_url(chat_id, request.model)
166
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
167
+
168
+ agent_mode = AGENT_MODE.get(request.model, {})
169
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
170
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
171
+
172
+ headers_api_chat = get_headers_api_chat(referer_url)
173
+ headers_chat = get_headers_chat(
174
+ referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""])
175
+ )
176
+
177
+ if request.model == 'o1-preview':
178
+ delay_seconds = random.randint(20, 60)
179
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
180
+ await asyncio.sleep(delay_seconds)
181
+
182
+ json_data = {
183
+ "agentMode": agent_mode,
184
+ "clickedAnswer2": False,
185
+ "clickedAnswer3": False,
186
+ "clickedForceWebSearch": False,
187
+ "codeModelMode": True,
188
+ "githubToken": None,
189
+ "id": chat_id,
190
+ "isChromeExt": False,
191
+ "isMicMode": False,
192
+ "maxTokens": request.max_tokens,
193
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
194
+ "mobileClient": False,
195
+ "playgroundTemperature": request.temperature,
196
+ "playgroundTopP": request.top_p,
197
+ "previewToken": None,
198
+ "trendingAgentMode": trending_agent_mode,
199
+ "userId": None,
200
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
201
+ "userSystemPrompt": None,
202
+ # Use validate.getHid() for the 'validated' field
203
+ "validated": validate.getHid(),
204
+ "visitFromDelta": False,
205
+ }
206
+
207
+ full_response = ""
208
+ async with httpx.AsyncClient() as client:
209
+ try:
210
+ async with client.stream(
211
+ method="POST",
212
+ url=f"{BASE_URL}/api/chat",
213
+ headers=headers_api_chat,
214
+ json=json_data,
215
+ ) as response:
216
+ response.raise_for_status()
217
+ async for chunk in response.aiter_text():
218
+ full_response += chunk
219
+ except httpx.HTTPStatusError as e:
220
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
221
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
222
+ except httpx.RequestError as e:
223
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
224
+ raise HTTPException(status_code=500, detail=str(e))
225
+
226
+ if "https://www.blackbox.ai" in full_response:
227
+ # Refresh hid and inform the user
228
+ validate.getHid(True)
229
+ full_response = "The HID has been refreshed; please try again."
230
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
231
+ full_response = full_response[21:]
232
+
233
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
234
+
235
+ return {
236
+ "id": f"chatcmpl-{uuid.uuid4()}",
237
+ "object": "chat.completion",
238
+ "created": int(datetime.now().timestamp()),
239
+ "model": request.model,
240
+ "choices": [
241
+ {
242
+ "index": 0,
243
+ "message": {"role": "assistant", "content": cleaned_full_response},
244
+ "finish_reason": "stop",
245
+ }
246
+ ],
247
+ "usage": None,
248
+ }