Niansuh commited on
Commit
bd6345a
·
verified ·
1 Parent(s): fc2a470

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +39 -24
api/utils.py CHANGED
@@ -1,19 +1,24 @@
 
 
1
  from datetime import datetime
2
  import json
3
  import uuid
4
  import asyncio
5
  import random
 
6
  from typing import Any, Dict, Optional
7
 
8
  import httpx
9
  from fastapi import HTTPException
10
  from api.config import (
11
  MODEL_MAPPING,
12
- common_headers, # Import common_headers directly
 
13
  BASE_URL,
14
  AGENT_MODE,
15
  TRENDING_AGENT_MODE,
16
  MODEL_PREFIXES,
 
17
  )
18
  from api.models import ChatRequest
19
  from api.logger import setup_logger
@@ -21,6 +26,9 @@ from api.validate import getHid # Import the asynchronous getHid function
21
 
22
  logger = setup_logger(__name__)
23
 
 
 
 
24
  # Helper function to create chat completion data
25
  def create_chat_completion_data(
26
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
@@ -45,11 +53,7 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
45
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
46
  if model_prefix:
47
  content = f"{model_prefix} {content}"
48
- if (
49
- isinstance(message.content, list)
50
- and len(message.content) == 2
51
- and "image_url" in message.content[1]
52
- ):
53
  # Ensure base64 images are always included for all models
54
  return {
55
  "role": message.role,
@@ -70,7 +74,7 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
  return content[len(model_prefix):].strip()
71
  return content
72
 
73
- # Process streaming response without headers
74
  async def process_streaming_response(request: ChatRequest):
75
  # Generate a unique ID for this request
76
  request_id = f"chatcmpl-{uuid.uuid4()}"
@@ -80,11 +84,12 @@ async def process_streaming_response(request: ChatRequest):
80
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
81
  model_prefix = MODEL_PREFIXES.get(request.model, "")
82
 
 
 
 
83
  if request.model == 'o1-preview':
84
  delay_seconds = random.randint(1, 60)
85
- logger.info(
86
- f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})"
87
- )
88
  await asyncio.sleep(delay_seconds)
89
 
90
  # Fetch the h-value for the 'validated' field
@@ -104,9 +109,7 @@ async def process_streaming_response(request: ChatRequest):
104
  "isChromeExt": False,
105
  "isMicMode": False,
106
  "maxTokens": request.max_tokens,
107
- "messages": [
108
- message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
109
- ],
110
  "mobileClient": False,
111
  "playgroundTemperature": request.temperature,
112
  "playgroundTopP": request.top_p,
@@ -124,6 +127,7 @@ async def process_streaming_response(request: ChatRequest):
124
  async with client.stream(
125
  "POST",
126
  f"{BASE_URL}/api/chat",
 
127
  json=json_data,
128
  timeout=100,
129
  ) as response:
@@ -134,6 +138,12 @@ async def process_streaming_response(request: ChatRequest):
134
  content = line
135
  if content.startswith("$@$v=undefined-rv1$@$"):
136
  content = content[21:]
 
 
 
 
 
 
137
  cleaned_content = strip_model_prefix(content, model_prefix)
138
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
139
 
@@ -146,7 +156,7 @@ async def process_streaming_response(request: ChatRequest):
146
  logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
147
  raise HTTPException(status_code=500, detail=str(e))
148
 
149
- # Process non-streaming response without headers
150
  async def process_non_streaming_response(request: ChatRequest):
151
  # Generate a unique ID for this request
152
  request_id = f"chatcmpl-{uuid.uuid4()}"
@@ -156,11 +166,13 @@ async def process_non_streaming_response(request: ChatRequest):
156
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
157
  model_prefix = MODEL_PREFIXES.get(request.model, "")
158
 
 
 
 
 
159
  if request.model == 'o1-preview':
160
  delay_seconds = random.randint(20, 60)
161
- logger.info(
162
- f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})"
163
- )
164
  await asyncio.sleep(delay_seconds)
165
 
166
  # Fetch the h-value for the 'validated' field
@@ -180,9 +192,7 @@ async def process_non_streaming_response(request: ChatRequest):
180
  "isChromeExt": False,
181
  "isMicMode": False,
182
  "maxTokens": request.max_tokens,
183
- "messages": [
184
- message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
185
- ],
186
  "mobileClient": False,
187
  "playgroundTemperature": request.temperature,
188
  "playgroundTopP": request.top_p,
@@ -199,9 +209,7 @@ async def process_non_streaming_response(request: ChatRequest):
199
  async with httpx.AsyncClient() as client:
200
  try:
201
  async with client.stream(
202
- method="POST",
203
- url=f"{BASE_URL}/api/chat",
204
- json=json_data,
205
  ) as response:
206
  response.raise_for_status()
207
  async for chunk in response.aiter_text():
@@ -212,10 +220,17 @@ async def process_non_streaming_response(request: ChatRequest):
212
  except httpx.RequestError as e:
213
  logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
214
  raise HTTPException(status_code=500, detail=str(e))
215
-
216
  if full_response.startswith("$@$v=undefined-rv1$@$"):
217
  full_response = full_response[21:]
218
 
 
 
 
 
 
 
 
219
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
220
 
221
  return {
 
1
+ # main.py or your main application file
2
+
3
  from datetime import datetime
4
  import json
5
  import uuid
6
  import asyncio
7
  import random
8
+ import string
9
  from typing import Any, Dict, Optional
10
 
11
  import httpx
12
  from fastapi import HTTPException
13
  from api.config import (
14
  MODEL_MAPPING,
15
+ get_headers_api_chat,
16
+ get_headers_chat,
17
  BASE_URL,
18
  AGENT_MODE,
19
  TRENDING_AGENT_MODE,
20
  MODEL_PREFIXES,
21
+ MODEL_REFERERS
22
  )
23
  from api.models import ChatRequest
24
  from api.logger import setup_logger
 
26
 
27
  logger = setup_logger(__name__)
28
 
29
+ # Define the blocked message
30
+ BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai"
31
+
32
  # Helper function to create chat completion data
33
  def create_chat_completion_data(
34
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
 
53
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
54
  if model_prefix:
55
  content = f"{model_prefix} {content}"
56
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
 
 
 
 
57
  # Ensure base64 images are always included for all models
58
  return {
59
  "role": message.role,
 
74
  return content[len(model_prefix):].strip()
75
  return content
76
 
77
+ # Process streaming response with headers from config.py
78
  async def process_streaming_response(request: ChatRequest):
79
  # Generate a unique ID for this request
80
  request_id = f"chatcmpl-{uuid.uuid4()}"
 
84
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
85
  model_prefix = MODEL_PREFIXES.get(request.model, "")
86
 
87
+ # Adjust headers_api_chat since referer_url is removed
88
+ headers_api_chat = get_headers_api_chat(BASE_URL)
89
+
90
  if request.model == 'o1-preview':
91
  delay_seconds = random.randint(1, 60)
92
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
 
 
93
  await asyncio.sleep(delay_seconds)
94
 
95
  # Fetch the h-value for the 'validated' field
 
109
  "isChromeExt": False,
110
  "isMicMode": False,
111
  "maxTokens": request.max_tokens,
112
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
113
  "mobileClient": False,
114
  "playgroundTemperature": request.temperature,
115
  "playgroundTopP": request.top_p,
 
127
  async with client.stream(
128
  "POST",
129
  f"{BASE_URL}/api/chat",
130
+ headers=headers_api_chat,
131
  json=json_data,
132
  timeout=100,
133
  ) as response:
 
138
  content = line
139
  if content.startswith("$@$v=undefined-rv1$@$"):
140
  content = content[21:]
141
+ # Remove the blocked message if present
142
+ if BLOCKED_MESSAGE in content:
143
+ logger.info(f"Blocked message detected in response for Request ID {request_id}.")
144
+ content = content.replace(BLOCKED_MESSAGE, '').strip()
145
+ if not content:
146
+ continue # Skip if content is empty after removal
147
  cleaned_content = strip_model_prefix(content, model_prefix)
148
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
149
 
 
156
  logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
157
  raise HTTPException(status_code=500, detail=str(e))
158
 
159
+ # Process non-streaming response with headers from config.py
160
  async def process_non_streaming_response(request: ChatRequest):
161
  # Generate a unique ID for this request
162
  request_id = f"chatcmpl-{uuid.uuid4()}"
 
166
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
167
  model_prefix = MODEL_PREFIXES.get(request.model, "")
168
 
169
+ # Adjust headers_api_chat and headers_chat since referer_url is removed
170
+ headers_api_chat = get_headers_api_chat(BASE_URL)
171
+ headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
172
+
173
  if request.model == 'o1-preview':
174
  delay_seconds = random.randint(20, 60)
175
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
 
 
176
  await asyncio.sleep(delay_seconds)
177
 
178
  # Fetch the h-value for the 'validated' field
 
192
  "isChromeExt": False,
193
  "isMicMode": False,
194
  "maxTokens": request.max_tokens,
195
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
196
  "mobileClient": False,
197
  "playgroundTemperature": request.temperature,
198
  "playgroundTopP": request.top_p,
 
209
  async with httpx.AsyncClient() as client:
210
  try:
211
  async with client.stream(
212
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
 
 
213
  ) as response:
214
  response.raise_for_status()
215
  async for chunk in response.aiter_text():
 
220
  except httpx.RequestError as e:
221
  logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
222
  raise HTTPException(status_code=500, detail=str(e))
223
+
224
  if full_response.startswith("$@$v=undefined-rv1$@$"):
225
  full_response = full_response[21:]
226
 
227
+ # Remove the blocked message if present
228
+ if BLOCKED_MESSAGE in full_response:
229
+ logger.info(f"Blocked message detected in response for Request ID {request_id}.")
230
+ full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
231
+ if not full_response:
232
+ raise HTTPException(status_code=500, detail="Blocked message detected in response.")
233
+
234
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
235
 
236
  return {