Niansuh commited on
Commit
95050a6
·
verified ·
1 Parent(s): 6856679

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +79 -13
api/utils.py CHANGED
@@ -68,16 +68,13 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
68
  return {"role": message.role, "content": content}
69
 
70
  async def process_streaming_response(request: ChatRequest):
71
- agent_mode = AGENT_MODE.get(request.model, {})
72
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
73
- model_prefix = MODEL_PREFIXES.get(request.model, "")
 
74
 
75
  hid = validate.getHid()
76
- logger.info(f"Using hid: {hid} for model: {request.model}")
77
-
78
- user_selected_model = MODEL_MAPPING.get(request.model, request.model)
79
- logger.info(f"Processing request for model: {request.model}")
80
- logger.info(f"Using userSelectedModel: {user_selected_model}")
81
 
82
  json_data = {
83
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
@@ -98,7 +95,7 @@ async def process_streaming_response(request: ChatRequest):
98
  "clickedForceWebSearch": False,
99
  "visitFromDelta": False,
100
  "mobileClient": False,
101
- "userSelectedModel": user_selected_model,
102
  "validated": hid
103
  }
104
 
@@ -120,13 +117,14 @@ async def process_streaming_response(request: ChatRequest):
120
  validate.getHid(True)
121
  content = "hid已刷新,重新对话即可"
122
  logger.info(f"hid refreshed due to content: {content}")
123
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
124
  break
125
  if content.startswith("$@$v=undefined-rv1$@$"):
126
  content = content[21:]
127
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
 
128
 
129
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
130
  yield "data: [DONE]\n\n"
131
  except httpx.HTTPStatusError as e:
132
  logger.error(f"HTTP error occurred: {e}")
@@ -135,4 +133,72 @@ async def process_streaming_response(request: ChatRequest):
135
  logger.error(f"Error occurred during request: {e}")
136
  raise HTTPException(status_code=500, detail=str(e))
137
 
138
- # Similar updates for process_non_streaming_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  return {"role": message.role, "content": content}
69
 
70
  async def process_streaming_response(request: ChatRequest):
71
+ model = request.model # Use the requested model directly
72
+ agent_mode = AGENT_MODE.get(model, {})
73
+ trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
74
+ model_prefix = MODEL_PREFIXES.get(model, "")
75
 
76
  hid = validate.getHid()
77
+ logger.info(f"Using hid: {hid} for model: {model}")
 
 
 
 
78
 
79
  json_data = {
80
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
95
  "clickedForceWebSearch": False,
96
  "visitFromDelta": False,
97
  "mobileClient": False,
98
+ "userSelectedModel": model, # Use the requested model
99
  "validated": hid
100
  }
101
 
 
117
  validate.getHid(True)
118
  content = "hid已刷新,重新对话即可"
119
  logger.info(f"hid refreshed due to content: {content}")
120
+ yield f"data: {json.dumps(create_chat_completion_data(content, model, timestamp))}\n\n"
121
  break
122
  if content.startswith("$@$v=undefined-rv1$@$"):
123
  content = content[21:]
124
+ # Do not strip model prefix or modify content
125
+ yield f"data: {json.dumps(create_chat_completion_data(content, model, timestamp))}\n\n"
126
 
127
+ yield f"data: {json.dumps(create_chat_completion_data('', model, timestamp, 'stop'))}\n\n"
128
  yield "data: [DONE]\n\n"
129
  except httpx.HTTPStatusError as e:
130
  logger.error(f"HTTP error occurred: {e}")
 
133
  logger.error(f"Error occurred during request: {e}")
134
  raise HTTPException(status_code=500, detail=str(e))
135
 
136
+ async def process_non_streaming_response(request: ChatRequest):
137
+ model = request.model # Use the requested model directly
138
+ agent_mode = AGENT_MODE.get(model, {})
139
+ trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
140
+ model_prefix = MODEL_PREFIXES.get(model, "")
141
+
142
+ hid = validate.getHid()
143
+ logger.info(f"Using hid: {hid} for model: {model}")
144
+
145
+ json_data = {
146
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
147
+ "previewToken": None,
148
+ "userId": None,
149
+ "codeModelMode": True,
150
+ "agentMode": agent_mode,
151
+ "trendingAgentMode": trending_agent_mode,
152
+ "isMicMode": False,
153
+ "userSystemPrompt": None,
154
+ "maxTokens": request.max_tokens,
155
+ "playgroundTopP": request.top_p,
156
+ "playgroundTemperature": request.temperature,
157
+ "isChromeExt": False,
158
+ "githubToken": None,
159
+ "clickedAnswer2": False,
160
+ "clickedAnswer3": False,
161
+ "clickedForceWebSearch": False,
162
+ "visitFromDelta": False,
163
+ "mobileClient": False,
164
+ "userSelectedModel": model, # Use the requested model
165
+ "validated": hid
166
+ }
167
+ full_response = ""
168
+ async with httpx.AsyncClient() as client:
169
+ try:
170
+ async with client.stream(
171
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
172
+ ) as response:
173
+ response.raise_for_status()
174
+ async for chunk in response.aiter_text():
175
+ full_response += chunk
176
+ except httpx.HTTPStatusError as e:
177
+ logger.error(f"HTTP error occurred: {e}")
178
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
179
+ except httpx.RequestError as e:
180
+ logger.error(f"Error occurred during request: {e}")
181
+ raise HTTPException(status_code=500, detail=str(e))
182
+
183
+ if "https://www.blackbox.ai" in full_response:
184
+ validate.getHid(True)
185
+ full_response = "hid已刷新,重新对话即可"
186
+ logger.info("hid refreshed due to response content")
187
+
188
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
189
+ full_response = full_response[21:]
190
+ # Do not strip model prefix or modify content
191
+ return {
192
+ "id": f"chatcmpl-{uuid.uuid4()}",
193
+ "object": "chat.completion",
194
+ "created": int(datetime.now().timestamp()),
195
+ "model": model,
196
+ "choices": [
197
+ {
198
+ "index": 0,
199
+ "message": {"role": "assistant", "content": full_response},
200
+ "finish_reason": "stop",
201
+ }
202
+ ],
203
+ "usage": None,
204
+ }