yym68686 commited on
Commit
8eca72e
·
1 Parent(s): 60e7a94

🐛 Bug: 1. Fix the bug of log errors being repeatedly displayed.

Browse files

2. Fix the bug when the model list is empty.

💻 Code: Remove the redundant redirection code from the GPT model interface

Files changed (3) hide show
  1. main.py +2 -4
  2. response.py +12 -40
  3. utils.py +13 -12
main.py CHANGED
@@ -220,8 +220,6 @@ async def process_request(request: Union[RequestModel, ImageGenerationRequest],
220
 
221
  return response
222
  except (Exception, HTTPException, asyncio.CancelledError, httpx.ReadError) as e:
223
- logger.error(f"Error with provider {provider['provider']}: {str(e)}")
224
-
225
  # 更新失败计数
226
  async with app.middleware_stack.app.lock:
227
  app.middleware_stack.app.channel_failure_counts[provider['provider']] += 1
@@ -340,9 +338,9 @@ class ModelRequestHandler:
340
  if provider['provider'] == provider_name:
341
  new_matching_providers.append(provider)
342
  matching_providers = new_matching_providers
343
- # import json
344
- # print("matching_providers", json.dumps(matching_providers, indent=4, ensure_ascii=False, default=circular_list_encoder))
345
 
 
 
346
  use_round_robin = True
347
  auto_retry = True
348
  if safe_get(config, 'api_keys', api_index, "preferences", "USE_ROUND_ROBIN") == False:
 
220
 
221
  return response
222
  except (Exception, HTTPException, asyncio.CancelledError, httpx.ReadError) as e:
 
 
223
  # 更新失败计数
224
  async with app.middleware_stack.app.lock:
225
  app.middleware_stack.app.channel_failure_counts[provider['provider']] += 1
 
338
  if provider['provider'] == provider_name:
339
  new_matching_providers.append(provider)
340
  matching_providers = new_matching_providers
 
 
341
 
342
+ # import json
343
+ # print("matching_providers", json.dumps(matching_providers, indent=4, ensure_ascii=False, default=circular_list_encoder))
344
  use_round_robin = True
345
  auto_retry = True
346
  if safe_get(config, 'api_keys', api_index, "preferences", "USE_ROUND_ROBIN") == False:
response.py CHANGED
@@ -140,48 +140,20 @@ async def fetch_vertex_claude_response_stream(client, url, headers, payload, mod
140
  yield "data: [DONE]\n\r\n"
141
 
142
  async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects=5):
143
- redirect_count = 0
144
- while redirect_count < max_redirects:
145
- # logger.info(f"fetch_gpt_response_stream: {url}")
146
- async with client.stream('POST', url, headers=headers, json=payload) as response:
147
- error_message = await check_response(response, "fetch_gpt_response_stream")
148
- if error_message:
149
- yield error_message
150
- return
151
-
152
- buffer = ""
153
- try:
154
- async for chunk in response.aiter_text():
155
- # logger.info(f"chunk: {repr(chunk)}")
156
- buffer += chunk
157
- if chunk.startswith("<script"):
158
- import re
159
- redirect_match = re.search(r"window\.location\.href\s*=\s*'([^']+)'", chunk)
160
- if redirect_match:
161
- new_url = redirect_match.group(1)
162
- # logger.info(f"new_url: {new_url}")
163
- if not new_url.startswith('http'):
164
- # 如果是相对路径,构造完整URL
165
- # logger.info(url.split('/'))
166
- base_url = '/'.join(url.split('/')[:3])
167
- new_url = base_url + new_url
168
- url = new_url
169
- # logger.info(f"new_url: {new_url}")
170
- redirect_count += 1
171
- break
172
- redirect_count = 0
173
- while "\n" in buffer:
174
- line, buffer = buffer.split("\n", 1)
175
- # logger.info("line: %s", repr(line))
176
- if line and line != "data: " and line != "data:" and not line.startswith(": "):
177
- yield line.strip() + "\n\r\n"
178
- except httpx.RemoteProtocolError as e:
179
- yield {"error": f"fetch_gpt_response_stream RemoteProtocolError {e.__class__.__name__}", "details": str(e)}
180
- return
181
- if redirect_count == 0:
182
  return
183
 
184
- yield {"error": "Too many redirects", "details": f"Reached maximum of {max_redirects} redirects"}
 
 
 
 
 
 
 
185
 
186
  async def fetch_claude_response_stream(client, url, headers, payload, model):
187
  timestamp = datetime.timestamp(datetime.now())
 
140
  yield "data: [DONE]\n\r\n"
141
 
142
  async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects=5):
143
+ async with client.stream('POST', url, headers=headers, json=payload) as response:
144
+ error_message = await check_response(response, "fetch_gpt_response_stream")
145
+ if error_message:
146
+ yield error_message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  return
148
 
149
+ buffer = ""
150
+ async for chunk in response.aiter_text():
151
+ buffer += chunk
152
+ while "\n" in buffer:
153
+ line, buffer = buffer.split("\n", 1)
154
+ # logger.info("line: %s", repr(line))
155
+ if line and line != "data: " and line != "data:" and not line.startswith(": "):
156
+ yield line.strip() + "\n\r\n"
157
 
158
  async def fetch_claude_response_stream(client, url, headers, payload, model):
159
  timestamp = datetime.timestamp(datetime.now())
utils.py CHANGED
@@ -29,18 +29,19 @@ def update_config(config_data):
29
  for index, api_key in enumerate(config_data['api_keys']):
30
  weights_dict = {}
31
  models = []
32
- for model in api_key.get('model'):
33
- if isinstance(model, dict):
34
- key, value = list(model.items())[0]
35
- provider_name = key.split("/")[0]
36
- if "/" in key:
37
- weights_dict.update({provider_name: int(value)})
38
- models.append(key)
39
- if isinstance(model, str):
40
- models.append(model)
41
- config_data['api_keys'][index]['weights'] = weights_dict
42
- config_data['api_keys'][index]['model'] = models
43
- api_keys_db[index]['model'] = models
 
44
 
45
  api_list = [item["api"] for item in api_keys_db]
46
  # logger.info(json.dumps(config_data, indent=4, ensure_ascii=False, default=circular_list_encoder))
 
29
  for index, api_key in enumerate(config_data['api_keys']):
30
  weights_dict = {}
31
  models = []
32
+ if api_key.get('model'):
33
+ for model in api_key.get('model'):
34
+ if isinstance(model, dict):
35
+ key, value = list(model.items())[0]
36
+ provider_name = key.split("/")[0]
37
+ if "/" in key:
38
+ weights_dict.update({provider_name: int(value)})
39
+ models.append(key)
40
+ if isinstance(model, str):
41
+ models.append(model)
42
+ config_data['api_keys'][index]['weights'] = weights_dict
43
+ config_data['api_keys'][index]['model'] = models
44
+ api_keys_db[index]['model'] = models
45
 
46
  api_list = [item["api"] for item in api_keys_db]
47
  # logger.info(json.dumps(config_data, indent=4, ensure_ascii=False, default=circular_list_encoder))