Niansuh commited on
Commit
55c6c36
·
verified ·
1 Parent(s): a897190

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +58 -77
api/utils.py CHANGED
@@ -11,7 +11,6 @@ from fastapi import HTTPException
11
  from api.config import (
12
  MODEL_MAPPING,
13
  get_headers_api_chat,
14
- get_headers_chat,
15
  BASE_URL,
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
@@ -27,12 +26,6 @@ from api import validate
27
  logger = setup_logger(__name__)
28
 
29
 
30
- # Helper function to create a random alphanumeric chat ID
31
- def generate_chat_id(length: int = 7) -> str:
32
- characters = string.ascii_letters + string.digits
33
- return ''.join(random.choices(characters, k=length))
34
-
35
-
36
  # Helper function to create chat completion data
37
  def create_chat_completion_data(
38
  content: str,
@@ -48,7 +41,7 @@ def create_chat_completion_data(
48
  "choices": [
49
  {
50
  "index": 0,
51
- "delta": {"content": content, "role": "assistant"},
52
  "finish_reason": finish_reason,
53
  }
54
  ],
@@ -93,19 +86,18 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
93
 
94
 
95
  # Function to get the correct referer URL for logging
96
- def get_referer_url(chat_id: str, model: str) -> str:
97
  """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
98
  if model in MODEL_REFERERS:
99
- return f"{BASE_URL}/chat/{chat_id}?model={model}"
100
  return BASE_URL
101
 
102
 
103
  # Process streaming response with headers from config.py
104
  async def process_streaming_response(request: ChatRequest):
105
- chat_id = generate_chat_id()
106
- referer_url = get_referer_url(chat_id, request.model)
107
  logger.info(
108
- f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}"
109
  )
110
 
111
  agent_mode = AGENT_MODE.get(request.model, {})
@@ -117,35 +109,33 @@ async def process_streaming_response(request: ChatRequest):
117
  if request.model == 'o1-preview':
118
  delay_seconds = random.randint(1, 60)
119
  logger.info(
120
- f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})"
121
  )
122
  await asyncio.sleep(delay_seconds)
123
 
124
  json_data = {
125
- "agentMode": agent_mode,
126
- "clickedAnswer2": False,
127
- "clickedAnswer3": False,
128
- "clickedForceWebSearch": False,
129
- "codeModelMode": True,
130
- "githubToken": None,
131
- "id": chat_id,
132
- "isChromeExt": False,
133
- "isMicMode": False,
134
- "maxTokens": request.max_tokens,
135
  "messages": [
136
  message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
137
  ],
138
- "mobileClient": False,
139
- "playgroundTemperature": request.temperature,
140
- "playgroundTopP": request.top_p,
141
  "previewToken": None,
142
- "trendingAgentMode": trending_agent_mode,
143
  "userId": None,
144
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
 
 
 
145
  "userSystemPrompt": None,
146
- # Use validate.getHid() for the 'validated' field
147
- "validated": validate.getHid(),
 
 
 
 
 
 
148
  "visitFromDelta": False,
 
 
 
149
  }
150
 
151
  async with httpx.AsyncClient() as client:
@@ -159,42 +149,42 @@ async def process_streaming_response(request: ChatRequest):
159
  ) as response:
160
  response.raise_for_status()
161
  async for line in response.aiter_lines():
162
- timestamp = int(datetime.now().timestamp())
163
  if line:
164
- content = line + "\n"
165
- if "https://www.blackbox.ai" in content:
166
  # Refresh hid and inform the user
167
  validate.getHid(True)
168
- content = "The HID has been refreshed; please try again.\n"
169
  yield (
170
- f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
171
  )
172
  break
173
- if content.startswith("$@$v=undefined-rv1$@$"):
174
- content = content[21:]
175
- cleaned_content = strip_model_prefix(content, model_prefix)
 
176
  yield (
177
- f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
178
  )
179
 
 
180
  yield (
181
- f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
182
  )
183
  yield "data: [DONE]\n\n"
184
  except httpx.HTTPStatusError as e:
185
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
186
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
187
  except httpx.RequestError as e:
188
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
189
  raise HTTPException(status_code=500, detail=str(e))
190
 
191
 
192
  # Process non-streaming response with headers from config.py
193
  async def process_non_streaming_response(request: ChatRequest):
194
- chat_id = generate_chat_id()
195
- referer_url = get_referer_url(chat_id, request.model)
196
  logger.info(
197
- f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}"
198
  )
199
 
200
  agent_mode = AGENT_MODE.get(request.model, {})
@@ -202,63 +192,54 @@ async def process_non_streaming_response(request: ChatRequest):
202
  model_prefix = MODEL_PREFIXES.get(request.model, "")
203
 
204
  headers_api_chat = get_headers_api_chat(referer_url)
205
- headers_chat = get_headers_chat(
206
- referer_url,
207
- next_action=str(uuid.uuid4()),
208
- next_router_state_tree=json.dumps([""]),
209
- )
210
 
211
  if request.model == 'o1-preview':
212
  delay_seconds = random.randint(20, 60)
213
  logger.info(
214
- f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})"
215
  )
216
  await asyncio.sleep(delay_seconds)
217
 
218
  json_data = {
219
- "agentMode": agent_mode,
220
- "clickedAnswer2": False,
221
- "clickedAnswer3": False,
222
- "clickedForceWebSearch": False,
223
- "codeModelMode": True,
224
- "githubToken": None,
225
- "id": chat_id,
226
- "isChromeExt": False,
227
- "isMicMode": False,
228
- "maxTokens": request.max_tokens,
229
  "messages": [
230
  message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
231
  ],
232
- "mobileClient": False,
233
- "playgroundTemperature": request.temperature,
234
- "playgroundTopP": request.top_p,
235
  "previewToken": None,
236
- "trendingAgentMode": trending_agent_mode,
237
  "userId": None,
238
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
 
 
 
239
  "userSystemPrompt": None,
240
- # Use validate.getHid() for the 'validated' field
241
- "validated": validate.getHid(),
 
 
 
 
 
 
242
  "visitFromDelta": False,
 
 
 
243
  }
244
 
245
  full_response = ""
246
  async with httpx.AsyncClient() as client:
247
  try:
248
- async with client.stream(
249
- method="POST",
250
  url=f"{BASE_URL}/api/chat",
251
  headers=headers_api_chat,
252
  json=json_data,
253
- ) as response:
254
- response.raise_for_status()
255
- async for chunk in response.aiter_text():
256
- full_response += chunk
257
  except httpx.HTTPStatusError as e:
258
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
259
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
260
  except httpx.RequestError as e:
261
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
262
  raise HTTPException(status_code=500, detail=str(e))
263
 
264
  if "https://www.blackbox.ai" in full_response:
 
11
  from api.config import (
12
  MODEL_MAPPING,
13
  get_headers_api_chat,
 
14
  BASE_URL,
15
  AGENT_MODE,
16
  TRENDING_AGENT_MODE,
 
26
  logger = setup_logger(__name__)
27
 
28
 
 
 
 
 
 
 
29
  # Helper function to create chat completion data
30
  def create_chat_completion_data(
31
  content: str,
 
41
  "choices": [
42
  {
43
  "index": 0,
44
+ "delta": {"content": content},
45
  "finish_reason": finish_reason,
46
  }
47
  ],
 
86
 
87
 
88
  # Function to get the correct referer URL for logging
89
+ def get_referer_url(model: str) -> str:
90
  """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
91
  if model in MODEL_REFERERS:
92
+ return f"{BASE_URL}/chat?model={model}"
93
  return BASE_URL
94
 
95
 
96
  # Process streaming response with headers from config.py
97
  async def process_streaming_response(request: ChatRequest):
98
+ referer_url = get_referer_url(request.model)
 
99
  logger.info(
100
+ f"Model: {request.model} - URL: {referer_url}"
101
  )
102
 
103
  agent_mode = AGENT_MODE.get(request.model, {})
 
109
  if request.model == 'o1-preview':
110
  delay_seconds = random.randint(1, 60)
111
  logger.info(
112
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'"
113
  )
114
  await asyncio.sleep(delay_seconds)
115
 
116
  json_data = {
 
 
 
 
 
 
 
 
 
 
117
  "messages": [
118
  message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
119
  ],
 
 
 
120
  "previewToken": None,
 
121
  "userId": None,
122
+ "codeModelMode": True,
123
+ "agentMode": agent_mode,
124
+ "trendingAgentMode": trending_agent_mode,
125
+ "isMicMode": False,
126
  "userSystemPrompt": None,
127
+ "maxTokens": request.max_tokens,
128
+ "playgroundTopP": request.top_p,
129
+ "playgroundTemperature": request.temperature,
130
+ "isChromeExt": False,
131
+ "githubToken": None,
132
+ "clickedAnswer2": False,
133
+ "clickedAnswer3": False,
134
+ "clickedForceWebSearch": False,
135
  "visitFromDelta": False,
136
+ "mobileClient": False,
137
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
138
+ "validated": validate.getHid(),
139
  }
140
 
141
  async with httpx.AsyncClient() as client:
 
149
  ) as response:
150
  response.raise_for_status()
151
  async for line in response.aiter_lines():
 
152
  if line:
153
+ # Process the line as per the Blackbox API response format
154
+ if "https://www.blackbox.ai" in line:
155
  # Refresh hid and inform the user
156
  validate.getHid(True)
157
+ content = "The HID has been refreshed; please try again."
158
  yield (
159
+ f"data: {json.dumps(create_chat_completion_data(content, request.model, int(datetime.now().timestamp())))}\n\n"
160
  )
161
  break
162
+ if line.startswith("$@$v=undefined-rv1$@$"):
163
+ line = line[21:]
164
+ cleaned_content = strip_model_prefix(line, model_prefix)
165
+ # Yield the data in the format expected by the client
166
  yield (
167
+ f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, int(datetime.now().timestamp())))}\n\n"
168
  )
169
 
170
+ # Signal that the response is complete
171
  yield (
172
+ f"data: {json.dumps(create_chat_completion_data('', request.model, int(datetime.now().timestamp()), 'stop'))}\n\n"
173
  )
174
  yield "data: [DONE]\n\n"
175
  except httpx.HTTPStatusError as e:
176
+ logger.error(f"HTTP error occurred: {e}")
177
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
178
  except httpx.RequestError as e:
179
+ logger.error(f"Error occurred during request: {e}")
180
  raise HTTPException(status_code=500, detail=str(e))
181
 
182
 
183
  # Process non-streaming response with headers from config.py
184
  async def process_non_streaming_response(request: ChatRequest):
185
+ referer_url = get_referer_url(request.model)
 
186
  logger.info(
187
+ f"Model: {request.model} - URL: {referer_url}"
188
  )
189
 
190
  agent_mode = AGENT_MODE.get(request.model, {})
 
192
  model_prefix = MODEL_PREFIXES.get(request.model, "")
193
 
194
  headers_api_chat = get_headers_api_chat(referer_url)
 
 
 
 
 
195
 
196
  if request.model == 'o1-preview':
197
  delay_seconds = random.randint(20, 60)
198
  logger.info(
199
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'"
200
  )
201
  await asyncio.sleep(delay_seconds)
202
 
203
  json_data = {
 
 
 
 
 
 
 
 
 
 
204
  "messages": [
205
  message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
206
  ],
 
 
 
207
  "previewToken": None,
 
208
  "userId": None,
209
+ "codeModelMode": True,
210
+ "agentMode": agent_mode,
211
+ "trendingAgentMode": trending_agent_mode,
212
+ "isMicMode": False,
213
  "userSystemPrompt": None,
214
+ "maxTokens": request.max_tokens,
215
+ "playgroundTopP": request.top_p,
216
+ "playgroundTemperature": request.temperature,
217
+ "isChromeExt": False,
218
+ "githubToken": None,
219
+ "clickedAnswer2": False,
220
+ "clickedAnswer3": False,
221
+ "clickedForceWebSearch": False,
222
  "visitFromDelta": False,
223
+ "mobileClient": False,
224
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
225
+ "validated": validate.getHid(),
226
  }
227
 
228
  full_response = ""
229
  async with httpx.AsyncClient() as client:
230
  try:
231
+ response = await client.post(
 
232
  url=f"{BASE_URL}/api/chat",
233
  headers=headers_api_chat,
234
  json=json_data,
235
+ )
236
+ response.raise_for_status()
237
+ full_response = response.text
 
238
  except httpx.HTTPStatusError as e:
239
+ logger.error(f"HTTP error occurred: {e}")
240
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
241
  except httpx.RequestError as e:
242
+ logger.error(f"Error occurred during request: {e}")
243
  raise HTTPException(status_code=500, detail=str(e))
244
 
245
  if "https://www.blackbox.ai" in full_response: