Update api/utils.py
Browse files- api/utils.py +86 -146
api/utils.py
CHANGED
@@ -1,29 +1,28 @@
|
|
1 |
-
# utils.py
|
2 |
-
|
3 |
from datetime import datetime
|
4 |
import json
|
5 |
import uuid
|
6 |
-
import random
|
7 |
from typing import Any, Dict, Optional
|
8 |
|
9 |
import httpx
|
10 |
-
from fastapi import HTTPException
|
11 |
-
from
|
12 |
-
|
13 |
-
|
|
|
|
|
14 |
AGENT_MODE,
|
15 |
TRENDING_AGENT_MODE,
|
|
|
16 |
BASE_URL,
|
17 |
-
|
18 |
-
USER_SELECTED_MODELS,
|
19 |
)
|
20 |
-
from
|
21 |
-
from
|
22 |
-
from api.validate import getHid
|
23 |
|
24 |
logger = setup_logger(__name__)
|
|
|
|
|
25 |
|
26 |
-
# Helper function to create chat completion data
|
27 |
def create_chat_completion_data(
|
28 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
29 |
) -> Dict[str, Any]:
|
@@ -42,28 +41,18 @@ def create_chat_completion_data(
|
|
42 |
"usage": None,
|
43 |
}
|
44 |
|
45 |
-
# Function to strip model prefix from content if present
|
46 |
-
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
47 |
-
"""Remove the model prefix from the response content if present."""
|
48 |
-
if model_prefix and content.startswith(model_prefix):
|
49 |
-
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
|
50 |
-
return content[len(model_prefix):].strip()
|
51 |
-
return content
|
52 |
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
54 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
55 |
-
content =
|
56 |
-
message.content if isinstance(message.content, str)
|
57 |
-
else message.content[0]["text"]
|
58 |
-
)
|
59 |
if model_prefix:
|
60 |
content = f"{model_prefix} {content}"
|
61 |
-
if (
|
62 |
-
isinstance(message.content, list)
|
63 |
-
and len(message.content) == 2
|
64 |
-
and "image_url" in message.content[1]
|
65 |
-
):
|
66 |
-
# Ensure base64 images are always included for all models
|
67 |
return {
|
68 |
"role": message.role,
|
69 |
"content": content,
|
@@ -75,58 +64,39 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
|
|
75 |
}
|
76 |
return {"role": message.role, "content": content}
|
77 |
|
78 |
-
# Process streaming response
|
79 |
-
async def process_streaming_response(request: ChatRequest):
|
80 |
-
chat_id = generate_id()
|
81 |
-
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model}")
|
82 |
-
|
83 |
-
model = get_model(request.model)
|
84 |
-
agent_mode = AGENT_MODE.get(model, {})
|
85 |
-
trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
|
86 |
-
model_prefix = MODEL_PREFIXES.get(model, "")
|
87 |
-
|
88 |
-
headers_api_chat = {
|
89 |
-
'accept': '*/*',
|
90 |
-
'accept-language': 'en-US,en;q=0.9',
|
91 |
-
'cache-control': 'no-cache',
|
92 |
-
'content-type': 'application/json',
|
93 |
-
'origin': BASE_URL,
|
94 |
-
'pragma': 'no-cache',
|
95 |
-
'priority': 'u=1, i',
|
96 |
-
'referer': f'{BASE_URL}/',
|
97 |
-
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
|
98 |
-
'sec-ch-ua-mobile': '?0',
|
99 |
-
'sec-ch-ua-platform': '"Linux"',
|
100 |
-
'sec-fetch-dest': 'empty',
|
101 |
-
'sec-fetch-mode': 'cors',
|
102 |
-
'sec-fetch-site': 'same-origin',
|
103 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
|
104 |
-
}
|
105 |
|
|
|
|
|
106 |
json_data = {
|
107 |
-
"
|
108 |
-
"
|
109 |
-
"
|
110 |
-
"clickedForceWebSearch": False,
|
111 |
"codeModelMode": True,
|
112 |
-
"
|
113 |
-
"
|
114 |
-
"isChromeExt": False,
|
115 |
"isMicMode": False,
|
|
|
116 |
"maxTokens": request.max_tokens,
|
117 |
-
"messages": [
|
118 |
-
message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
|
119 |
-
],
|
120 |
-
"mobileClient": False,
|
121 |
-
"playgroundTemperature": request.temperature,
|
122 |
"playgroundTopP": request.top_p,
|
123 |
-
"
|
124 |
-
"
|
125 |
-
"
|
126 |
-
"
|
127 |
-
"
|
128 |
-
"
|
129 |
"visitFromDelta": False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
}
|
131 |
|
132 |
async with httpx.AsyncClient() as client:
|
@@ -134,7 +104,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
134 |
async with client.stream(
|
135 |
"POST",
|
136 |
f"{BASE_URL}/api/chat",
|
137 |
-
headers=
|
138 |
json=json_data,
|
139 |
timeout=100,
|
140 |
) as response:
|
@@ -144,107 +114,77 @@ async def process_streaming_response(request: ChatRequest):
|
|
144 |
if line:
|
145 |
content = line
|
146 |
if "https://www.blackbox.ai" in content:
|
147 |
-
|
148 |
-
content =
|
149 |
-
"HID has been refreshed, please start a new conversation.\n"
|
150 |
-
)
|
151 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
152 |
break
|
153 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
154 |
content = content[21:]
|
155 |
-
|
156 |
-
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
157 |
|
158 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
159 |
yield "data: [DONE]\n\n"
|
160 |
except httpx.HTTPStatusError as e:
|
161 |
-
logger.error(f"HTTP error occurred
|
162 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
163 |
except httpx.RequestError as e:
|
164 |
-
logger.error(f"Error occurred during request
|
165 |
raise HTTPException(status_code=500, detail=str(e))
|
166 |
|
167 |
-
# Process non-streaming response
|
168 |
-
async def process_non_streaming_response(request: ChatRequest):
|
169 |
-
chat_id = generate_id()
|
170 |
-
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model}")
|
171 |
-
|
172 |
-
model = get_model(request.model)
|
173 |
-
agent_mode = AGENT_MODE.get(model, {})
|
174 |
-
trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
|
175 |
-
model_prefix = MODEL_PREFIXES.get(model, "")
|
176 |
-
|
177 |
-
headers_api_chat = {
|
178 |
-
'accept': '*/*',
|
179 |
-
'accept-language': 'en-US,en;q=0.9',
|
180 |
-
'cache-control': 'no-cache',
|
181 |
-
'content-type': 'application/json',
|
182 |
-
'origin': BASE_URL,
|
183 |
-
'pragma': 'no-cache',
|
184 |
-
'priority': 'u=1, i',
|
185 |
-
'referer': f'{BASE_URL}/',
|
186 |
-
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
|
187 |
-
'sec-ch-ua-mobile': '?0',
|
188 |
-
'sec-ch-ua-platform': '"Linux"',
|
189 |
-
'sec-fetch-dest': 'empty',
|
190 |
-
'sec-fetch-mode': 'cors',
|
191 |
-
'sec-fetch-site': 'same-origin',
|
192 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
|
193 |
-
}
|
194 |
|
|
|
|
|
195 |
json_data = {
|
196 |
-
"
|
197 |
-
"
|
198 |
-
"
|
199 |
-
"clickedForceWebSearch": False,
|
200 |
"codeModelMode": True,
|
201 |
-
"
|
202 |
-
"
|
203 |
-
"isChromeExt": False,
|
204 |
"isMicMode": False,
|
|
|
205 |
"maxTokens": request.max_tokens,
|
206 |
-
"messages": [
|
207 |
-
message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
|
208 |
-
],
|
209 |
-
"mobileClient": False,
|
210 |
-
"playgroundTemperature": request.temperature,
|
211 |
"playgroundTopP": request.top_p,
|
212 |
-
"
|
213 |
-
"
|
214 |
-
"
|
215 |
-
"
|
216 |
-
"
|
217 |
-
"
|
218 |
"visitFromDelta": False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
}
|
220 |
|
221 |
full_response = ""
|
222 |
async with httpx.AsyncClient() as client:
|
223 |
try:
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
headers=headers_api_chat,
|
228 |
-
json=json_data,
|
229 |
-
) as response:
|
230 |
-
response.raise_for_status()
|
231 |
-
async for chunk in response.aiter_text():
|
232 |
-
full_response += chunk
|
233 |
except httpx.HTTPStatusError as e:
|
234 |
-
logger.error(f"HTTP error occurred
|
235 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
236 |
except httpx.RequestError as e:
|
237 |
-
logger.error(f"Error occurred during request
|
238 |
raise HTTPException(status_code=500, detail=str(e))
|
239 |
|
240 |
if "https://www.blackbox.ai" in full_response:
|
241 |
-
|
242 |
-
full_response = "HID
|
243 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
244 |
full_response = full_response[21:]
|
245 |
|
246 |
-
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
|
247 |
-
|
248 |
return {
|
249 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
250 |
"object": "chat.completion",
|
@@ -253,7 +193,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
253 |
"choices": [
|
254 |
{
|
255 |
"index": 0,
|
256 |
-
"message": {"role": "assistant", "content":
|
257 |
"finish_reason": "stop",
|
258 |
}
|
259 |
],
|
|
|
|
|
|
|
1 |
from datetime import datetime
|
2 |
import json
|
3 |
import uuid
|
|
|
4 |
from typing import Any, Dict, Optional
|
5 |
|
6 |
import httpx
|
7 |
+
from fastapi import HTTPException, Depends
|
8 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
9 |
+
|
10 |
+
from . import validate
|
11 |
+
from .config import (
|
12 |
+
MODEL_MAPPING,
|
13 |
AGENT_MODE,
|
14 |
TRENDING_AGENT_MODE,
|
15 |
+
MODEL_PREFIXES,
|
16 |
BASE_URL,
|
17 |
+
APP_SECRET,
|
|
|
18 |
)
|
19 |
+
from .models import ChatRequest
|
20 |
+
from .logger import setup_logger
|
|
|
21 |
|
22 |
logger = setup_logger(__name__)
|
23 |
+
security = HTTPBearer()
|
24 |
+
|
25 |
|
|
|
26 |
def create_chat_completion_data(
|
27 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
28 |
) -> Dict[str, Any]:
|
|
|
41 |
"usage": None,
|
42 |
}
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
+
def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
46 |
+
if credentials.credentials != APP_SECRET:
|
47 |
+
raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
|
48 |
+
return credentials.credentials
|
49 |
+
|
50 |
+
|
51 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
52 |
+
content = message.content
|
|
|
|
|
|
|
53 |
if model_prefix:
|
54 |
content = f"{model_prefix} {content}"
|
55 |
+
if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
|
|
|
|
|
|
|
|
|
|
|
56 |
return {
|
57 |
"role": message.role,
|
58 |
"content": content,
|
|
|
64 |
}
|
65 |
return {"role": message.role, "content": content}
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
+
async def process_streaming_response(request: ChatRequest):
|
69 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
70 |
json_data = {
|
71 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
72 |
+
"previewToken": None,
|
73 |
+
"userId": None,
|
|
|
74 |
"codeModelMode": True,
|
75 |
+
"agentMode": AGENT_MODE.get(request.model, {}),
|
76 |
+
"trendingAgentMode": TRENDING_AGENT_MODE.get(request.model, {}),
|
|
|
77 |
"isMicMode": False,
|
78 |
+
"userSystemPrompt": None,
|
79 |
"maxTokens": request.max_tokens,
|
|
|
|
|
|
|
|
|
|
|
80 |
"playgroundTopP": request.top_p,
|
81 |
+
"playgroundTemperature": request.temperature,
|
82 |
+
"isChromeExt": False,
|
83 |
+
"githubToken": None,
|
84 |
+
"clickedAnswer2": False,
|
85 |
+
"clickedAnswer3": False,
|
86 |
+
"clickedForceWebSearch": False,
|
87 |
"visitFromDelta": False,
|
88 |
+
"mobileClient": False,
|
89 |
+
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
90 |
+
"validated": validate.get_hid(),
|
91 |
+
}
|
92 |
+
|
93 |
+
headers = {
|
94 |
+
"Content-Type": "application/json",
|
95 |
+
"User-Agent": (
|
96 |
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
97 |
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
98 |
+
"Chrome/91.0.4472.124 Safari/537.36"
|
99 |
+
),
|
100 |
}
|
101 |
|
102 |
async with httpx.AsyncClient() as client:
|
|
|
104 |
async with client.stream(
|
105 |
"POST",
|
106 |
f"{BASE_URL}/api/chat",
|
107 |
+
headers=headers,
|
108 |
json=json_data,
|
109 |
timeout=100,
|
110 |
) as response:
|
|
|
114 |
if line:
|
115 |
content = line
|
116 |
if "https://www.blackbox.ai" in content:
|
117 |
+
validate.get_hid(True)
|
118 |
+
content = "HID refreshed, please start a new conversation.\n"
|
|
|
|
|
119 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
120 |
break
|
121 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
122 |
content = content[21:]
|
123 |
+
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
|
|
124 |
|
125 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
126 |
yield "data: [DONE]\n\n"
|
127 |
except httpx.HTTPStatusError as e:
|
128 |
+
logger.error(f"HTTP error occurred: {e}")
|
129 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
130 |
except httpx.RequestError as e:
|
131 |
+
logger.error(f"Error occurred during request: {e}")
|
132 |
raise HTTPException(status_code=500, detail=str(e))
|
133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
+
async def process_non_streaming_response(request: ChatRequest):
|
136 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
137 |
json_data = {
|
138 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
139 |
+
"previewToken": None,
|
140 |
+
"userId": None,
|
|
|
141 |
"codeModelMode": True,
|
142 |
+
"agentMode": AGENT_MODE.get(request.model, {}),
|
143 |
+
"trendingAgentMode": TRENDING_AGENT_MODE.get(request.model, {}),
|
|
|
144 |
"isMicMode": False,
|
145 |
+
"userSystemPrompt": None,
|
146 |
"maxTokens": request.max_tokens,
|
|
|
|
|
|
|
|
|
|
|
147 |
"playgroundTopP": request.top_p,
|
148 |
+
"playgroundTemperature": request.temperature,
|
149 |
+
"isChromeExt": False,
|
150 |
+
"githubToken": None,
|
151 |
+
"clickedAnswer2": False,
|
152 |
+
"clickedAnswer3": False,
|
153 |
+
"clickedForceWebSearch": False,
|
154 |
"visitFromDelta": False,
|
155 |
+
"mobileClient": False,
|
156 |
+
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
157 |
+
"validated": validate.get_hid(),
|
158 |
+
}
|
159 |
+
|
160 |
+
headers = {
|
161 |
+
"Content-Type": "application/json",
|
162 |
+
"User-Agent": (
|
163 |
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
164 |
+
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
165 |
+
"Chrome/91.0.4472.124 Safari/537.36"
|
166 |
+
),
|
167 |
}
|
168 |
|
169 |
full_response = ""
|
170 |
async with httpx.AsyncClient() as client:
|
171 |
try:
|
172 |
+
response = await client.post(f"{BASE_URL}/api/chat", headers=headers, json=json_data)
|
173 |
+
response.raise_for_status()
|
174 |
+
full_response = response.text
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
except httpx.HTTPStatusError as e:
|
176 |
+
logger.error(f"HTTP error occurred: {e}")
|
177 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
178 |
except httpx.RequestError as e:
|
179 |
+
logger.error(f"Error occurred during request: {e}")
|
180 |
raise HTTPException(status_code=500, detail=str(e))
|
181 |
|
182 |
if "https://www.blackbox.ai" in full_response:
|
183 |
+
validate.get_hid(True)
|
184 |
+
full_response = "HID refreshed, please start a new conversation."
|
185 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
186 |
full_response = full_response[21:]
|
187 |
|
|
|
|
|
188 |
return {
|
189 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
190 |
"object": "chat.completion",
|
|
|
193 |
"choices": [
|
194 |
{
|
195 |
"index": 0,
|
196 |
+
"message": {"role": "assistant", "content": full_response},
|
197 |
"finish_reason": "stop",
|
198 |
}
|
199 |
],
|