🐛 Bug: Remove redundant error-catching code snippets
Browse files💰 Sponsors: Thanks to @PowerHunter for the ¥1400 sponsorship, sponsorship information has been added to the README.
- README.md +1 -1
- README_CN.md +1 -1
- main.py +5 -2
- response.py +25 -25
README.md
CHANGED
@@ -332,7 +332,7 @@ curl -X POST http://127.0.0.1:8000/v1/chat/completions \
|
|
332 |
|
333 |
We thank the following sponsors for their support:
|
334 |
<!-- ¥1000 -->
|
335 |
-
- @PowerHunter: ¥
|
336 |
|
337 |
## How to sponsor us
|
338 |
|
|
|
332 |
|
333 |
We thank the following sponsors for their support:
|
334 |
<!-- ¥1000 -->
|
335 |
+
- @PowerHunter: ¥1400
|
336 |
|
337 |
## How to sponsor us
|
338 |
|
README_CN.md
CHANGED
@@ -332,7 +332,7 @@ curl -X POST http://127.0.0.1:8000/v1/chat/completions \
|
|
332 |
|
333 |
我们感谢以下赞助商的支持:
|
334 |
<!-- ¥1000 -->
|
335 |
-
- @PowerHunter:¥
|
336 |
|
337 |
## 如何赞助我们
|
338 |
|
|
|
332 |
|
333 |
我们感谢以下赞助商的支持:
|
334 |
<!-- ¥1000 -->
|
335 |
+
- @PowerHunter:¥1400
|
336 |
|
337 |
## 如何赞助我们
|
338 |
|
main.py
CHANGED
@@ -818,7 +818,7 @@ async def process_request(request: Union[RequestModel, ImageGenerationRequest, A
|
|
818 |
current_info["provider"] = channel_id
|
819 |
return response
|
820 |
|
821 |
-
except (Exception, HTTPException, asyncio.CancelledError, httpx.ReadError, httpx.RemoteProtocolError, httpx.ReadTimeout) as e:
|
822 |
await update_channel_stats(current_info["request_id"], channel_id, request.model, current_info["api_key"], success=False)
|
823 |
raise e
|
824 |
|
@@ -1051,12 +1051,15 @@ class ModelRequestHandler:
|
|
1051 |
try:
|
1052 |
response = await process_request(request, provider, endpoint)
|
1053 |
return response
|
1054 |
-
except (Exception, HTTPException, asyncio.CancelledError, httpx.ReadError, httpx.RemoteProtocolError, httpx.ReadTimeout) as e:
|
1055 |
|
1056 |
# 根据异常类型设置状态码和错误消息
|
1057 |
if isinstance(e, httpx.ReadTimeout):
|
1058 |
status_code = 504 # Gateway Timeout
|
1059 |
error_message = "Request timed out"
|
|
|
|
|
|
|
1060 |
elif isinstance(e, httpx.ReadError):
|
1061 |
status_code = 502 # Bad Gateway
|
1062 |
error_message = "Network read error"
|
|
|
818 |
current_info["provider"] = channel_id
|
819 |
return response
|
820 |
|
821 |
+
except (Exception, HTTPException, asyncio.CancelledError, httpx.ReadError, httpx.RemoteProtocolError, httpx.ReadTimeout, httpx.ConnectError) as e:
|
822 |
await update_channel_stats(current_info["request_id"], channel_id, request.model, current_info["api_key"], success=False)
|
823 |
raise e
|
824 |
|
|
|
1051 |
try:
|
1052 |
response = await process_request(request, provider, endpoint)
|
1053 |
return response
|
1054 |
+
except (Exception, HTTPException, asyncio.CancelledError, httpx.ReadError, httpx.RemoteProtocolError, httpx.ReadTimeout, httpx.ConnectError) as e:
|
1055 |
|
1056 |
# 根据异常类型设置状态码和错误消息
|
1057 |
if isinstance(e, httpx.ReadTimeout):
|
1058 |
status_code = 504 # Gateway Timeout
|
1059 |
error_message = "Request timed out"
|
1060 |
+
elif isinstance(e, httpx.ConnectError):
|
1061 |
+
status_code = 503 # Service Unavailable
|
1062 |
+
error_message = "Unable to connect to service"
|
1063 |
elif isinstance(e, httpx.ReadError):
|
1064 |
status_code = 502 # Bad Gateway
|
1065 |
error_message = "Network read error"
|
response.py
CHANGED
@@ -364,28 +364,28 @@ async def fetch_response(client, url, headers, payload, engine, model):
|
|
364 |
yield response_json
|
365 |
|
366 |
async def fetch_response_stream(client, url, headers, payload, engine, model):
|
367 |
-
try:
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
except httpx.ConnectError as e:
|
389 |
-
|
390 |
-
except httpx.ReadTimeout as e:
|
391 |
-
|
|
|
364 |
yield response_json
|
365 |
|
366 |
async def fetch_response_stream(client, url, headers, payload, engine, model):
|
367 |
+
# try:
|
368 |
+
if engine == "gemini" or engine == "vertex-gemini":
|
369 |
+
async for chunk in fetch_gemini_response_stream(client, url, headers, payload, model):
|
370 |
+
yield chunk
|
371 |
+
elif engine == "claude" or engine == "vertex-claude":
|
372 |
+
async for chunk in fetch_claude_response_stream(client, url, headers, payload, model):
|
373 |
+
yield chunk
|
374 |
+
elif engine == "gpt":
|
375 |
+
async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
|
376 |
+
yield chunk
|
377 |
+
elif engine == "openrouter":
|
378 |
+
async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
|
379 |
+
yield chunk
|
380 |
+
elif engine == "cloudflare":
|
381 |
+
async for chunk in fetch_cloudflare_response_stream(client, url, headers, payload, model):
|
382 |
+
yield chunk
|
383 |
+
elif engine == "cohere":
|
384 |
+
async for chunk in fetch_cohere_response_stream(client, url, headers, payload, model):
|
385 |
+
yield chunk
|
386 |
+
else:
|
387 |
+
raise ValueError("Unknown response")
|
388 |
+
# except httpx.ConnectError as e:
|
389 |
+
# yield {"error": f"500", "details": "fetch_response_stream Connect Error"}
|
390 |
+
# except httpx.ReadTimeout as e:
|
391 |
+
# yield {"error": f"500", "details": "fetch_response_stream Read Response Timeout"}
|