Niansuh commited on
Commit
7875e72
·
verified ·
1 Parent(s): 7737401

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +66 -151
api/utils.py CHANGED
@@ -1,33 +1,20 @@
1
  from datetime import datetime
2
  import json
3
  import uuid
4
- import asyncio
5
- import random
6
- import string
7
  from typing import Any, Dict, Optional
8
 
9
  import httpx
10
- from fastapi import HTTPException
11
- from api.config import (
12
- MODEL_MAPPING,
13
- headers,
14
- BASE_URL,
15
- AGENT_MODE,
16
- TRENDING_AGENT_MODE,
17
- MODEL_PREFIXES
18
- )
19
  from api.models import ChatRequest
20
  from api.logger import setup_logger
21
- from api import validate
22
 
23
  logger = setup_logger(__name__)
 
24
 
25
- # Helper function to create a random alphanumeric chat ID
26
- def generate_chat_id(length: int = 7) -> str:
27
- characters = string.ascii_letters + string.digits
28
- return ''.join(random.choices(characters, k=length))
29
-
30
- # Helper function to create chat completion data
31
  def create_chat_completion_data(
32
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
33
  ) -> Dict[str, Any]:
@@ -46,92 +33,57 @@ def create_chat_completion_data(
46
  "usage": None,
47
  }
48
 
49
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
50
- def message_to_dict(message, model_prefix: Optional[str] = None):
51
- if isinstance(message.content, str):
52
- content = message.content
53
- elif isinstance(message.content, list):
54
- content = message.content[0]["text"]
55
- else:
56
- content = message.content
57
 
58
- if model_prefix:
59
- content = f"{model_prefix} {content}"
60
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
61
- # Ensure base64 images are always included for all models
62
  return {
63
  "role": message.role,
64
- "content": content,
65
  "data": {
66
  "imageBase64": message.content[1]["image_url"]["url"],
67
  "fileText": "",
68
  "title": "snapshot",
69
  },
70
  }
71
- return {"role": message.role, "content": content}
72
-
73
- # Function to strip model prefix from content if present
74
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
75
- """Remove the model prefix from the response content if present."""
76
- if model_prefix and content.startswith(model_prefix):
77
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
78
- return content[len(model_prefix):].strip()
79
- return content
80
 
81
- # Process streaming response
82
  async def process_streaming_response(request: ChatRequest):
83
- chat_id = generate_chat_id()
84
- referer_url = BASE_URL
85
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
86
-
87
- agent_mode = AGENT_MODE.get(request.model, {})
88
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
89
- model_prefix = MODEL_PREFIXES.get(request.model, "")
90
-
91
- headers_api_chat = headers.copy()
92
- headers_api_chat['Referer'] = referer_url
93
- headers_api_chat['Cookie'] = f'hid={validate.getHid()}'
94
-
95
- logger.debug(f"Headers being sent: {headers_api_chat}")
96
-
97
- if request.model == 'o1-preview':
98
- delay_seconds = random.randint(1, 60)
99
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
100
- await asyncio.sleep(delay_seconds)
101
-
102
  json_data = {
103
- "agentMode": agent_mode,
104
- "clickedAnswer2": False,
105
- "clickedAnswer3": False,
106
- "clickedForceWebSearch": False,
107
  "codeModelMode": True,
108
- "githubToken": None,
109
- "id": chat_id,
110
- "isChromeExt": False,
111
  "isMicMode": False,
 
112
  "maxTokens": request.max_tokens,
113
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
114
- "mobileClient": False,
115
- "playgroundTemperature": request.temperature,
116
  "playgroundTopP": request.top_p,
117
- "previewToken": None,
118
- "trendingAgentMode": trending_agent_mode,
119
- "userId": None,
120
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
121
- "userSystemPrompt": None,
122
- # Remove 'validated' if not required
123
- # "validated": validate.getHid(),
124
  "visitFromDelta": False,
 
 
 
125
  }
126
 
127
- logger.debug(f"JSON payload being sent: {json.dumps(json_data)}")
128
-
129
  async with httpx.AsyncClient() as client:
130
  try:
131
  async with client.stream(
132
  "POST",
133
  f"{BASE_URL}/api/chat",
134
- headers=headers_api_chat,
135
  json=json_data,
136
  timeout=100,
137
  ) as response:
@@ -139,107 +91,70 @@ async def process_streaming_response(request: ChatRequest):
139
  async for line in response.aiter_lines():
140
  timestamp = int(datetime.now().timestamp())
141
  if line:
142
- content = line
143
- logger.debug(f"Received content: {content}")
144
- # Modify the condition to detect specific error message
145
- if "Invalid or expired 'hid'" in content:
146
- logger.warning("Invalid or expired 'hid' detected. Refreshing 'hid'.")
147
  validate.getHid(True)
148
  content = "hid已刷新,重新对话即可\n"
149
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
150
  break
151
- elif content.startswith("$@$v=undefined-rv1$@$"):
152
- content = content[21:]
153
- cleaned_content = strip_model_prefix(content, model_prefix)
154
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
155
 
156
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
157
  yield "data: [DONE]\n\n"
158
  except httpx.HTTPStatusError as e:
159
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
160
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
161
  except httpx.RequestError as e:
162
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
163
  raise HTTPException(status_code=500, detail=str(e))
164
 
165
- # Process non-streaming response
166
  async def process_non_streaming_response(request: ChatRequest):
167
- chat_id = generate_chat_id()
168
- referer_url = BASE_URL
169
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
170
-
171
- agent_mode = AGENT_MODE.get(request.model, {})
172
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
173
- model_prefix = MODEL_PREFIXES.get(request.model, "")
174
-
175
- headers_api_chat = headers.copy()
176
- headers_api_chat['Referer'] = referer_url
177
- headers_api_chat['Cookie'] = f'hid={validate.getHid()}'
178
-
179
- logger.debug(f"Headers being sent: {headers_api_chat}")
180
-
181
- if request.model == 'o1-preview':
182
- delay_seconds = random.randint(20, 60)
183
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
184
- await asyncio.sleep(delay_seconds)
185
-
186
  json_data = {
187
- "agentMode": agent_mode,
188
- "clickedAnswer2": False,
189
- "clickedAnswer3": False,
190
- "clickedForceWebSearch": False,
191
  "codeModelMode": True,
192
- "githubToken": None,
193
- "id": chat_id,
194
- "isChromeExt": False,
195
  "isMicMode": False,
 
196
  "maxTokens": request.max_tokens,
197
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
198
- "mobileClient": False,
199
- "playgroundTemperature": request.temperature,
200
  "playgroundTopP": request.top_p,
201
- "previewToken": None,
202
- "trendingAgentMode": trending_agent_mode,
203
- "userId": None,
204
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
205
- "userSystemPrompt": None,
206
- # Remove 'validated' if not required
207
- # "validated": validate.getHid(),
208
  "visitFromDelta": False,
 
 
 
209
  }
210
-
211
- logger.debug(f"JSON payload being sent: {json.dumps(json_data)}")
212
-
213
  full_response = ""
214
  async with httpx.AsyncClient() as client:
215
  try:
216
- response = await client.post(
217
- f"{BASE_URL}/api/chat",
218
- headers=headers_api_chat,
219
- json=json_data,
220
- timeout=100,
221
- )
222
- response.raise_for_status()
223
- full_response = response.text
224
- logger.debug(f"Full response received: {full_response}")
225
  except httpx.HTTPStatusError as e:
226
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
227
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
228
  except httpx.RequestError as e:
229
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
230
  raise HTTPException(status_code=500, detail=str(e))
231
 
232
- # Modify the condition to detect specific error message
233
- if "Invalid or expired 'hid'" in full_response:
234
- logger.warning("Invalid or expired 'hid' detected. Refreshing 'hid'.")
235
  validate.getHid(True)
236
  full_response = "hid已刷新,重新对话即可"
237
-
238
  if full_response.startswith("$@$v=undefined-rv1$@$"):
239
  full_response = full_response[21:]
240
-
241
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
242
-
243
  return {
244
  "id": f"chatcmpl-{uuid.uuid4()}",
245
  "object": "chat.completion",
@@ -248,7 +163,7 @@ async def process_non_streaming_response(request: ChatRequest):
248
  "choices": [
249
  {
250
  "index": 0,
251
- "message": {"role": "assistant", "content": cleaned_full_response},
252
  "finish_reason": "stop",
253
  }
254
  ],
 
1
  from datetime import datetime
2
  import json
3
  import uuid
 
 
 
4
  from typing import Any, Dict, Optional
5
 
6
  import httpx
7
+ from fastapi import Depends, HTTPException
8
+ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
9
+
10
+ from api import validate
11
+ from api.config import APP_SECRET, BASE_URL, MODEL_MAPPING, headers
 
 
 
 
12
  from api.models import ChatRequest
13
  from api.logger import setup_logger
 
14
 
15
  logger = setup_logger(__name__)
16
+ security = HTTPBearer()
17
 
 
 
 
 
 
 
18
  def create_chat_completion_data(
19
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
20
  ) -> Dict[str, Any]:
 
33
  "usage": None,
34
  }
35
 
36
+ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
37
+ if credentials.credentials != APP_SECRET:
38
+ raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
39
+ return credentials.credentials
 
 
 
 
40
 
41
+ def message_to_dict(message):
42
+ if isinstance(message.content, str):
43
+ return {"role": message.role, "content": message.content}
44
+ elif isinstance(message.content, list) and len(message.content) == 2:
45
  return {
46
  "role": message.role,
47
+ "content": message.content[0]["text"],
48
  "data": {
49
  "imageBase64": message.content[1]["image_url"]["url"],
50
  "fileText": "",
51
  "title": "snapshot",
52
  },
53
  }
54
+ else:
55
+ return {"role": message.role, "content": message.content}
 
 
 
 
 
 
 
56
 
 
57
  async def process_streaming_response(request: ChatRequest):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  json_data = {
59
+ "messages": [message_to_dict(msg) for msg in request.messages],
60
+ "previewToken": None,
61
+ "userId": None,
 
62
  "codeModelMode": True,
63
+ "agentMode": {},
64
+ "trendingAgentMode": {},
 
65
  "isMicMode": False,
66
+ "userSystemPrompt": None,
67
  "maxTokens": request.max_tokens,
 
 
 
68
  "playgroundTopP": request.top_p,
69
+ "playgroundTemperature": request.temperature,
70
+ "isChromeExt": False,
71
+ "githubToken": None,
72
+ "clickedAnswer2": False,
73
+ "clickedAnswer3": False,
74
+ "clickedForceWebSearch": False,
 
75
  "visitFromDelta": False,
76
+ "mobileClient": False,
77
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
78
+ "validated": validate.getHid()
79
  }
80
 
 
 
81
  async with httpx.AsyncClient() as client:
82
  try:
83
  async with client.stream(
84
  "POST",
85
  f"{BASE_URL}/api/chat",
86
+ headers=headers,
87
  json=json_data,
88
  timeout=100,
89
  ) as response:
 
91
  async for line in response.aiter_lines():
92
  timestamp = int(datetime.now().timestamp())
93
  if line:
94
+ content = line + "\n"
95
+ if "https://www.blackbox.ai" in content:
 
 
 
96
  validate.getHid(True)
97
  content = "hid已刷新,重新对话即可\n"
98
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
99
  break
100
+ if content.startswith("$@$v=undefined-rv1$@$"):
101
+ yield f"data: {json.dumps(create_chat_completion_data(content[21:], request.model, timestamp))}\n\n"
102
+ else:
103
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
104
 
105
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
106
  yield "data: [DONE]\n\n"
107
  except httpx.HTTPStatusError as e:
108
+ logger.error(f"HTTP error occurred: {e}")
109
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
110
  except httpx.RequestError as e:
111
+ logger.error(f"Error occurred during request: {e}")
112
  raise HTTPException(status_code=500, detail=str(e))
113
 
 
114
  async def process_non_streaming_response(request: ChatRequest):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  json_data = {
116
+ "messages": [message_to_dict(msg) for msg in request.messages],
117
+ "previewToken": None,
118
+ "userId": None,
 
119
  "codeModelMode": True,
120
+ "agentMode": {},
121
+ "trendingAgentMode": {},
 
122
  "isMicMode": False,
123
+ "userSystemPrompt": None,
124
  "maxTokens": request.max_tokens,
 
 
 
125
  "playgroundTopP": request.top_p,
126
+ "playgroundTemperature": request.temperature,
127
+ "isChromeExt": False,
128
+ "githubToken": None,
129
+ "clickedAnswer2": False,
130
+ "clickedAnswer3": False,
131
+ "clickedForceWebSearch": False,
 
132
  "visitFromDelta": False,
133
+ "mobileClient": False,
134
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
135
+ "validated": validate.getHid()
136
  }
 
 
 
137
  full_response = ""
138
  async with httpx.AsyncClient() as client:
139
  try:
140
+ async with client.stream(
141
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
142
+ ) as response:
143
+ response.raise_for_status()
144
+ async for chunk in response.aiter_text():
145
+ full_response += chunk
 
 
 
146
  except httpx.HTTPStatusError as e:
147
+ logger.error(f"HTTP error occurred: {e}")
148
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
149
  except httpx.RequestError as e:
150
+ logger.error(f"Error occurred during request: {e}")
151
  raise HTTPException(status_code=500, detail=str(e))
152
 
153
+ if "https://www.blackbox.ai" in full_response:
 
 
154
  validate.getHid(True)
155
  full_response = "hid已刷新,重新对话即可"
 
156
  if full_response.startswith("$@$v=undefined-rv1$@$"):
157
  full_response = full_response[21:]
 
 
 
158
  return {
159
  "id": f"chatcmpl-{uuid.uuid4()}",
160
  "object": "chat.completion",
 
163
  "choices": [
164
  {
165
  "index": 0,
166
+ "message": {"role": "assistant", "content": full_response},
167
  "finish_reason": "stop",
168
  }
169
  ],