Update api/utils.py
Browse files- api/utils.py +125 -102
api/utils.py
CHANGED
@@ -1,28 +1,27 @@
|
|
1 |
from datetime import datetime
|
2 |
import json
|
3 |
import uuid
|
|
|
|
|
4 |
from typing import Any, Dict, Optional
|
5 |
|
6 |
import httpx
|
7 |
-
from fastapi import
|
8 |
-
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
9 |
-
|
10 |
-
from api import validate
|
11 |
from api.config import (
|
12 |
-
|
13 |
-
|
14 |
-
MODEL_MAPPING,
|
15 |
AGENT_MODE,
|
16 |
TRENDING_AGENT_MODE,
|
17 |
-
|
18 |
-
|
19 |
)
|
20 |
from api.models import ChatRequest
|
21 |
from api.logger import setup_logger
|
|
|
22 |
|
23 |
logger = setup_logger(__name__)
|
24 |
-
security = HTTPBearer()
|
25 |
|
|
|
26 |
def create_chat_completion_data(
|
27 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
28 |
) -> Dict[str, Any]:
|
@@ -41,71 +40,82 @@ def create_chat_completion_data(
|
|
41 |
"usage": None,
|
42 |
}
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
48 |
|
|
|
49 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
50 |
-
content = message.content
|
51 |
-
if
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
content_text = f"{model_prefix} {content_text}"
|
56 |
return {
|
57 |
"role": message.role,
|
58 |
-
"content":
|
59 |
"data": {
|
60 |
-
"imageBase64": content[1]["image_url"]["url"],
|
61 |
"fileText": "",
|
62 |
"title": "snapshot",
|
63 |
},
|
64 |
}
|
65 |
-
|
66 |
-
if model_prefix:
|
67 |
-
content = f"{model_prefix} {content}"
|
68 |
-
return {"role": message.role, "content": content}
|
69 |
-
|
70 |
-
def generate_chat_id() -> str:
|
71 |
-
return str(uuid.uuid4())
|
72 |
|
|
|
73 |
async def process_streaming_response(request: ChatRequest):
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
json_data = {
|
88 |
-
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
89 |
-
"previewToken": None,
|
90 |
-
"userId": None,
|
91 |
-
"codeModelMode": True,
|
92 |
"agentMode": agent_mode,
|
93 |
-
"trendingAgentMode": trending_agent_mode,
|
94 |
-
"isMicMode": False,
|
95 |
-
"userSystemPrompt": None,
|
96 |
-
"maxTokens": request.max_tokens,
|
97 |
-
"playgroundTopP": request.top_p,
|
98 |
-
"playgroundTemperature": request.temperature,
|
99 |
-
"isChromeExt": False,
|
100 |
-
"githubToken": None,
|
101 |
"clickedAnswer2": False,
|
102 |
"clickedAnswer3": False,
|
103 |
"clickedForceWebSearch": False,
|
104 |
-
"
|
105 |
-
"
|
106 |
"id": chat_id,
|
107 |
-
"
|
108 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
}
|
110 |
|
111 |
async with httpx.AsyncClient() as client:
|
@@ -113,7 +123,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
113 |
async with client.stream(
|
114 |
"POST",
|
115 |
f"{BASE_URL}/api/chat",
|
116 |
-
headers=
|
117 |
json=json_data,
|
118 |
timeout=100,
|
119 |
) as response:
|
@@ -123,87 +133,100 @@ async def process_streaming_response(request: ChatRequest):
|
|
123 |
if line:
|
124 |
content = line
|
125 |
if "https://www.blackbox.ai" in content:
|
126 |
-
|
127 |
-
|
128 |
-
content = "hid has been refreshed, please retry"
|
129 |
-
logger.info(f"hid refreshed due to content: {content}")
|
130 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
131 |
break
|
132 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
133 |
content = content[21:]
|
134 |
-
|
|
|
135 |
|
136 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
137 |
yield "data: [DONE]\n\n"
|
138 |
except httpx.HTTPStatusError as e:
|
139 |
-
logger.error(f"HTTP error occurred: {e}")
|
140 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
141 |
except httpx.RequestError as e:
|
142 |
-
logger.error(f"Error occurred during request: {e}")
|
143 |
raise HTTPException(status_code=500, detail=str(e))
|
144 |
|
|
|
145 |
async def process_non_streaming_response(request: ChatRequest):
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
159 |
json_data = {
|
160 |
-
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
161 |
-
"previewToken": None,
|
162 |
-
"userId": None,
|
163 |
-
"codeModelMode": True,
|
164 |
"agentMode": agent_mode,
|
165 |
-
"trendingAgentMode": trending_agent_mode,
|
166 |
-
"isMicMode": False,
|
167 |
-
"userSystemPrompt": None,
|
168 |
-
"maxTokens": request.max_tokens,
|
169 |
-
"playgroundTopP": request.top_p,
|
170 |
-
"playgroundTemperature": request.temperature,
|
171 |
-
"isChromeExt": False,
|
172 |
-
"githubToken": None,
|
173 |
"clickedAnswer2": False,
|
174 |
"clickedAnswer3": False,
|
175 |
"clickedForceWebSearch": False,
|
176 |
-
"
|
177 |
-
"
|
178 |
"id": chat_id,
|
179 |
-
"
|
180 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
}
|
|
|
182 |
full_response = ""
|
183 |
async with httpx.AsyncClient() as client:
|
184 |
try:
|
185 |
async with client.stream(
|
186 |
-
method="POST", url=f"{BASE_URL}/api/chat", headers=
|
187 |
) as response:
|
188 |
response.raise_for_status()
|
189 |
async for chunk in response.aiter_text():
|
190 |
full_response += chunk
|
191 |
except httpx.HTTPStatusError as e:
|
192 |
-
logger.error(f"HTTP error occurred: {e}")
|
193 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
194 |
except httpx.RequestError as e:
|
195 |
-
logger.error(f"Error occurred during request: {e}")
|
196 |
raise HTTPException(status_code=500, detail=str(e))
|
197 |
|
198 |
if "https://www.blackbox.ai" in full_response:
|
199 |
-
|
200 |
-
|
201 |
-
full_response = "hid has been refreshed, please retry"
|
202 |
-
logger.info("hid refreshed due to response content")
|
203 |
-
|
204 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
205 |
full_response = full_response[21:]
|
206 |
|
|
|
|
|
207 |
return {
|
208 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
209 |
"object": "chat.completion",
|
@@ -212,7 +235,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
212 |
"choices": [
|
213 |
{
|
214 |
"index": 0,
|
215 |
-
"message": {"role": "assistant", "content":
|
216 |
"finish_reason": "stop",
|
217 |
}
|
218 |
],
|
|
|
1 |
from datetime import datetime
|
2 |
import json
|
3 |
import uuid
|
4 |
+
import asyncio
|
5 |
+
import random
|
6 |
from typing import Any, Dict, Optional
|
7 |
|
8 |
import httpx
|
9 |
+
from fastapi import HTTPException
|
|
|
|
|
|
|
10 |
from api.config import (
|
11 |
+
get_model,
|
12 |
+
MODEL_PREFIXES,
|
|
|
13 |
AGENT_MODE,
|
14 |
TRENDING_AGENT_MODE,
|
15 |
+
BASE_URL,
|
16 |
+
generate_id,
|
17 |
)
|
18 |
from api.models import ChatRequest
|
19 |
from api.logger import setup_logger
|
20 |
+
from api.validate import getHid
|
21 |
|
22 |
logger = setup_logger(__name__)
|
|
|
23 |
|
24 |
+
# Helper function to create chat completion data
|
25 |
def create_chat_completion_data(
|
26 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
27 |
) -> Dict[str, Any]:
|
|
|
40 |
"usage": None,
|
41 |
}
|
42 |
|
43 |
+
# Function to strip model prefix from content if present
|
44 |
+
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
45 |
+
"""Remove the model prefix from the response content if present."""
|
46 |
+
if model_prefix and content.startswith(model_prefix):
|
47 |
+
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
|
48 |
+
return content[len(model_prefix):].strip()
|
49 |
+
return content
|
50 |
|
51 |
+
# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
|
52 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
53 |
+
content = message.content if isinstance(message.content, str) else message.content[0]["text"]
|
54 |
+
if model_prefix:
|
55 |
+
content = f"{model_prefix} {content}"
|
56 |
+
if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
|
57 |
+
# Ensure base64 images are always included for all models
|
|
|
58 |
return {
|
59 |
"role": message.role,
|
60 |
+
"content": content,
|
61 |
"data": {
|
62 |
+
"imageBase64": message.content[1]["image_url"]["url"],
|
63 |
"fileText": "",
|
64 |
"title": "snapshot",
|
65 |
},
|
66 |
}
|
67 |
+
return {"role": message.role, "content": content}
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
# Process streaming response
|
70 |
async def process_streaming_response(request: ChatRequest):
|
71 |
+
chat_id = generate_id()
|
72 |
+
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model}")
|
73 |
+
|
74 |
+
model = get_model(request.model)
|
75 |
+
agent_mode = AGENT_MODE.get(model, {})
|
76 |
+
trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
|
77 |
+
model_prefix = MODEL_PREFIXES.get(model, "")
|
78 |
+
|
79 |
+
headers_api_chat = {
|
80 |
+
'accept': '*/*',
|
81 |
+
'accept-language': 'en-US,en;q=0.9',
|
82 |
+
'cache-control': 'no-cache',
|
83 |
+
'content-type': 'application/json',
|
84 |
+
'origin': BASE_URL,
|
85 |
+
'pragma': 'no-cache',
|
86 |
+
'priority': 'u=1, i',
|
87 |
+
'referer': f'{BASE_URL}/',
|
88 |
+
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
|
89 |
+
'sec-ch-ua-mobile': '?0',
|
90 |
+
'sec-ch-ua-platform': '"Linux"',
|
91 |
+
'sec-fetch-dest': 'empty',
|
92 |
+
'sec-fetch-mode': 'cors',
|
93 |
+
'sec-fetch-site': 'same-origin',
|
94 |
+
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
|
95 |
+
}
|
96 |
|
97 |
json_data = {
|
|
|
|
|
|
|
|
|
98 |
"agentMode": agent_mode,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
"clickedAnswer2": False,
|
100 |
"clickedAnswer3": False,
|
101 |
"clickedForceWebSearch": False,
|
102 |
+
"codeModelMode": True,
|
103 |
+
"githubToken": None,
|
104 |
"id": chat_id,
|
105 |
+
"isChromeExt": False,
|
106 |
+
"isMicMode": False,
|
107 |
+
"maxTokens": request.max_tokens,
|
108 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
109 |
+
"mobileClient": False,
|
110 |
+
"playgroundTemperature": request.temperature,
|
111 |
+
"playgroundTopP": request.top_p,
|
112 |
+
"previewToken": None,
|
113 |
+
"trendingAgentMode": trending_agent_mode,
|
114 |
+
"userId": None,
|
115 |
+
"userSelectedModel": model if model in USER_SELECTED_MODELS else None,
|
116 |
+
"userSystemPrompt": None,
|
117 |
+
"validated": getHid(),
|
118 |
+
"visitFromDelta": False,
|
119 |
}
|
120 |
|
121 |
async with httpx.AsyncClient() as client:
|
|
|
123 |
async with client.stream(
|
124 |
"POST",
|
125 |
f"{BASE_URL}/api/chat",
|
126 |
+
headers=headers_api_chat,
|
127 |
json=json_data,
|
128 |
timeout=100,
|
129 |
) as response:
|
|
|
133 |
if line:
|
134 |
content = line
|
135 |
if "https://www.blackbox.ai" in content:
|
136 |
+
getHid(True)
|
137 |
+
content = "HID has been refreshed, please start a new conversation.\n"
|
|
|
|
|
138 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
139 |
break
|
140 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
141 |
content = content[21:]
|
142 |
+
cleaned_content = strip_model_prefix(content, model_prefix)
|
143 |
+
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
144 |
|
145 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
146 |
yield "data: [DONE]\n\n"
|
147 |
except httpx.HTTPStatusError as e:
|
148 |
+
logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
|
149 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
150 |
except httpx.RequestError as e:
|
151 |
+
logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
|
152 |
raise HTTPException(status_code=500, detail=str(e))
|
153 |
|
154 |
+
# Process non-streaming response
|
155 |
async def process_non_streaming_response(request: ChatRequest):
|
156 |
+
chat_id = generate_id()
|
157 |
+
logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model}")
|
158 |
+
|
159 |
+
model = get_model(request.model)
|
160 |
+
agent_mode = AGENT_MODE.get(model, {})
|
161 |
+
trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
|
162 |
+
model_prefix = MODEL_PREFIXES.get(model, "")
|
163 |
+
|
164 |
+
headers_api_chat = {
|
165 |
+
'accept': '*/*',
|
166 |
+
'accept-language': 'en-US,en;q=0.9',
|
167 |
+
'cache-control': 'no-cache',
|
168 |
+
'content-type': 'application/json',
|
169 |
+
'origin': BASE_URL,
|
170 |
+
'pragma': 'no-cache',
|
171 |
+
'priority': 'u=1, i',
|
172 |
+
'referer': f'{BASE_URL}/',
|
173 |
+
'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
|
174 |
+
'sec-ch-ua-mobile': '?0',
|
175 |
+
'sec-ch-ua-platform': '"Linux"',
|
176 |
+
'sec-fetch-dest': 'empty',
|
177 |
+
'sec-fetch-mode': 'cors',
|
178 |
+
'sec-fetch-site': 'same-origin',
|
179 |
+
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
|
180 |
+
}
|
181 |
|
182 |
json_data = {
|
|
|
|
|
|
|
|
|
183 |
"agentMode": agent_mode,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
"clickedAnswer2": False,
|
185 |
"clickedAnswer3": False,
|
186 |
"clickedForceWebSearch": False,
|
187 |
+
"codeModelMode": True,
|
188 |
+
"githubToken": None,
|
189 |
"id": chat_id,
|
190 |
+
"isChromeExt": False,
|
191 |
+
"isMicMode": False,
|
192 |
+
"maxTokens": request.max_tokens,
|
193 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
194 |
+
"mobileClient": False,
|
195 |
+
"playgroundTemperature": request.temperature,
|
196 |
+
"playgroundTopP": request.top_p,
|
197 |
+
"previewToken": None,
|
198 |
+
"trendingAgentMode": trending_agent_mode,
|
199 |
+
"userId": None,
|
200 |
+
"userSelectedModel": model if model in USER_SELECTED_MODELS else None,
|
201 |
+
"userSystemPrompt": None,
|
202 |
+
"validated": getHid(),
|
203 |
+
"visitFromDelta": False,
|
204 |
}
|
205 |
+
|
206 |
full_response = ""
|
207 |
async with httpx.AsyncClient() as client:
|
208 |
try:
|
209 |
async with client.stream(
|
210 |
+
method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
|
211 |
) as response:
|
212 |
response.raise_for_status()
|
213 |
async for chunk in response.aiter_text():
|
214 |
full_response += chunk
|
215 |
except httpx.HTTPStatusError as e:
|
216 |
+
logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
|
217 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
218 |
except httpx.RequestError as e:
|
219 |
+
logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
|
220 |
raise HTTPException(status_code=500, detail=str(e))
|
221 |
|
222 |
if "https://www.blackbox.ai" in full_response:
|
223 |
+
getHid(True)
|
224 |
+
full_response = "HID has been refreshed, please start a new conversation."
|
|
|
|
|
|
|
225 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
226 |
full_response = full_response[21:]
|
227 |
|
228 |
+
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
|
229 |
+
|
230 |
return {
|
231 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
232 |
"object": "chat.completion",
|
|
|
235 |
"choices": [
|
236 |
{
|
237 |
"index": 0,
|
238 |
+
"message": {"role": "assistant", "content": cleaned_full_response},
|
239 |
"finish_reason": "stop",
|
240 |
}
|
241 |
],
|