Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -653,11 +653,21 @@ def handsome_chat_completions():
|
|
653 |
logging.error(f"无效的图片数据: {item}")
|
654 |
openai_images.append({"url": item})
|
655 |
|
656 |
-
|
657 |
response_data = {
|
|
|
658 |
"created": int(time.time()),
|
659 |
-
"
|
|
|
|
|
660 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
661 |
except (KeyError, ValueError, IndexError) as e:
|
662 |
logging.error(
|
663 |
f"解析响应 JSON 失败: {e}, "
|
@@ -683,6 +693,7 @@ def handsome_chat_completions():
|
|
683 |
logging.error(f"请求转发异常: {e}")
|
684 |
return jsonify({"error": str(e)}), 500
|
685 |
else:
|
|
|
686 |
try:
|
687 |
start_time = time.time()
|
688 |
response = requests.post(
|
@@ -696,7 +707,8 @@ def handsome_chat_completions():
|
|
696 |
return jsonify(response.json()), 429
|
697 |
|
698 |
if data.get("stream", False):
|
699 |
-
|
|
|
700 |
first_chunk_time = None
|
701 |
full_response_content = ""
|
702 |
for chunk in response.iter_content(chunk_size=1024):
|
@@ -803,10 +815,14 @@ def handsome_chat_completions():
|
|
803 |
request_timestamps.append(time.time())
|
804 |
token_counts.append(prompt_tokens+completion_tokens)
|
805 |
|
806 |
-
|
807 |
-
|
808 |
-
|
809 |
-
|
|
|
|
|
|
|
|
|
810 |
else:
|
811 |
response.raise_for_status()
|
812 |
end_time = time.time()
|
|
|
653 |
logging.error(f"无效的图片数据: {item}")
|
654 |
openai_images.append({"url": item})
|
655 |
|
656 |
+
# Construct the expected JSON output
|
657 |
response_data = {
|
658 |
+
"images": openai_images, # use this key so the frontend can parse
|
659 |
"created": int(time.time()),
|
660 |
+
"timings": {
|
661 |
+
"inference": total_time # add inference time as shown in example
|
662 |
+
}
|
663 |
}
|
664 |
+
if "seed" in response_json:
|
665 |
+
response_data["seed"] = response_json["seed"]
|
666 |
+
if "shared_id" in response_json:
|
667 |
+
response_data["shared_id"] = response_json["shared_id"]
|
668 |
+
if "data" in response_json:
|
669 |
+
response_data["data"] = response_json["data"]
|
670 |
+
|
671 |
except (KeyError, ValueError, IndexError) as e:
|
672 |
logging.error(
|
673 |
f"解析响应 JSON 失败: {e}, "
|
|
|
693 |
logging.error(f"请求转发异常: {e}")
|
694 |
return jsonify({"error": str(e)}), 500
|
695 |
else:
|
696 |
+
# Existing text-based model handling logic
|
697 |
try:
|
698 |
start_time = time.time()
|
699 |
response = requests.post(
|
|
|
707 |
return jsonify(response.json()), 429
|
708 |
|
709 |
if data.get("stream", False):
|
710 |
+
# ... (Existing stream response handling)
|
711 |
+
def generate():
|
712 |
first_chunk_time = None
|
713 |
full_response_content = ""
|
714 |
for chunk in response.iter_content(chunk_size=1024):
|
|
|
815 |
request_timestamps.append(time.time())
|
816 |
token_counts.append(prompt_tokens+completion_tokens)
|
817 |
|
818 |
+
|
819 |
+
|
820 |
+
|
821 |
+
|
822 |
+
return Response(
|
823 |
+
stream_with_context(generate()),
|
824 |
+
content_type=response.headers['Content-Type']
|
825 |
+
)
|
826 |
else:
|
827 |
response.raise_for_status()
|
828 |
end_time = time.time()
|