yangtb24 commited on
Commit
ec74129
1 Parent(s): fccd514

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -113
app.py CHANGED
@@ -5,8 +5,11 @@ import requests
5
  import json
6
  import random
7
  import uuid
8
- import concurrent.futures
 
 
9
  import threading
 
10
  from datetime import datetime, timedelta
11
  from apscheduler.schedulers.background import BackgroundScheduler
12
  from flask import Flask, request, jsonify, Response, stream_with_context
@@ -556,13 +559,15 @@ def handsome_chat_completions():
556
  return jsonify({"error": "Invalid request data"}), 400
557
 
558
  model_name = data['model']
559
-
560
  request_type = determine_request_type(
561
  model_name,
562
- text_models + image_models,
563
- free_text_models + free_image_models
 
 
564
  )
565
-
566
  api_key = select_key(request_type, model_name)
567
 
568
  if not api_key:
@@ -575,46 +580,44 @@ def handsome_chat_completions():
575
  )
576
  }
577
  ), 429
578
-
579
  headers = {
580
  "Authorization": f"Bearer {api_key}",
581
  "Content-Type": "application/json"
582
  }
583
-
584
- if model_name in image_models:
585
- # Handle image generation
586
- # Map OpenAI-style parameters to SiliconFlow's parameters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
  siliconflow_data = {
588
  "model": model_name,
589
- "prompt": data.get("messages", [{}])[0].get("content") if isinstance(data.get("messages"), list) else "",
590
- "image_size": data.get("size", "1024x1024"),
591
- "batch_size": data.get("n", 1),
592
- "num_inference_steps": data.get("steps", 20),
593
- "guidance_scale": data.get("guidance_scale", 7.5),
594
- "negative_prompt": data.get("negative_prompt"),
595
- "seed": data.get("seed"),
596
  "prompt_enhancement": False,
597
  }
598
-
599
- # Parameter validation and adjustments
600
- if siliconflow_data["batch_size"] < 1:
601
- siliconflow_data["batch_size"] = 1
602
- if siliconflow_data["batch_size"] > 4:
603
- siliconflow_data["batch_size"] = 4
604
-
605
- if siliconflow_data["num_inference_steps"] < 1:
606
- siliconflow_data["num_inference_steps"] = 1
607
- if siliconflow_data["num_inference_steps"] > 50:
608
- siliconflow_data["num_inference_steps"] = 50
609
-
610
- if siliconflow_data["guidance_scale"] < 0:
611
- siliconflow_data["guidance_scale"] = 0
612
- if siliconflow_data["guidance_scale"] > 100:
613
- siliconflow_data["guidance_scale"] = 100
614
-
615
- if siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024"]:
616
- siliconflow_data["image_size"] = "1024x1024"
617
-
618
  try:
619
  start_time = time.time()
620
  response = requests.post(
@@ -623,7 +626,7 @@ def handsome_chat_completions():
623
  json=siliconflow_data,
624
  timeout=120
625
  )
626
-
627
  if response.status_code == 429:
628
  return jsonify(response.json()), 429
629
 
@@ -631,53 +634,16 @@ def handsome_chat_completions():
631
  end_time = time.time()
632
  response_json = response.json()
633
  total_time = end_time - start_time
634
-
635
  try:
636
  images = response_json.get("images", [])
637
-
638
- # Extract the first URL if available
639
- image_url = ""
640
- if images and isinstance(images[0], dict) and "url" in images[0]:
641
- image_url = images[0]["url"]
642
- logging.info(f"Extracted image URL: {image_url}")
643
- elif images and isinstance(images[0], str):
644
- image_url = images[0]
645
- logging.info(f"Extracted image URL: {image_url}")
646
-
647
- # Construct the expected JSON output - Mimicking OpenAI
648
  response_data = {
649
- "id": f"chatcmpl-{uuid.uuid4()}",
650
- "object": "chat.completion",
651
- "created": int(time.time()),
652
- "model": model_name,
653
- "system_fingerprint": "", # Added system_fingerprint
654
- "choices": [
655
- {
656
- "index": 0,
657
- "message": {
658
- "role": "assistant",
659
- "content": None, # set to None as image is in tool_calls
660
- "tool_calls": [
661
- {
662
- "id": f"call_{uuid.uuid4()}",
663
- "type": "function",
664
- "function": {
665
- "name": "image_generation",
666
- "arguments": json.dumps({
667
- "image_url": image_url
668
- })
669
- }
670
- }
671
- ]
672
- },
673
- "finish_reason": "tool_calls",
674
- }
675
- ],
676
- "usage": { # Added usage
677
- "completion_tokens": 0,
678
- "prompt_tokens": 0,
679
- "total_tokens": 0
680
- }
681
  }
682
 
683
  except (KeyError, ValueError, IndexError) as e:
@@ -686,32 +652,15 @@ def handsome_chat_completions():
686
  f"完整内容: {response_json}"
687
  )
688
  response_data = {
689
- "id": f"chatcmpl-{uuid.uuid4()}",
690
- "object": "chat.completion",
691
- "created": int(time.time()),
692
- "model": model_name,
693
- "system_fingerprint": "", # Added system_fingerprint
694
- "choices": [
695
- {
696
- "index": 0,
697
- "message": {
698
- "role": "assistant",
699
- "content": "Failed to process image data",
700
- },
701
- "finish_reason": "stop",
702
- }
703
- ],
704
- "usage": { # Added usage
705
- "completion_tokens": 0,
706
- "prompt_tokens": 0,
707
- "total_tokens": 0
708
- }
709
  }
710
 
711
  logging.info(
712
  f"使用的key: {api_key}, "
713
  f"总共用时: {total_time:.4f}秒, "
714
- f"使用的模型: {model_name}"
 
715
  )
716
 
717
  with data_lock:
@@ -719,11 +668,13 @@ def handsome_chat_completions():
719
  token_counts.append(0) # Image generation doesn't use tokens
720
 
721
  return jsonify(response_data)
 
722
  except requests.exceptions.RequestException as e:
723
  logging.error(f"请求转发异常: {e}")
724
  return jsonify({"error": str(e)}), 500
 
725
  else:
726
- # Existing text-based model handling logic
727
  try:
728
  start_time = time.time()
729
  response = requests.post(
@@ -733,6 +684,7 @@ def handsome_chat_completions():
733
  stream=data.get("stream", False),
734
  timeout=60
735
  )
 
736
  if response.status_code == 429:
737
  return jsonify(response.json()), 429
738
 
@@ -884,8 +836,7 @@ def handsome_chat_completions():
884
  item.get("type") == "text"
885
  ):
886
  user_content += (
887
- item.get("text", "") +
888
- " "
889
  )
890
 
891
  user_content = user_content.strip()
@@ -927,6 +878,7 @@ def list_models():
927
 
928
  detailed_models = []
929
 
 
930
  for model in text_models:
931
  detailed_models.append({
932
  "id": model,
@@ -953,6 +905,7 @@ def list_models():
953
  "parent": None
954
  })
955
 
 
956
  for model in embedding_models:
957
  detailed_models.append({
958
  "id": model,
@@ -978,9 +931,10 @@ def list_models():
978
  "root": model,
979
  "parent": None
980
  })
981
-
 
982
  for model in image_models:
983
- detailed_models.append({
984
  "id": model,
985
  "object": "model",
986
  "created": 1678888888,
@@ -992,7 +946,33 @@ def list_models():
992
  "created": 1678888888,
993
  "allow_create_engine": False,
994
  "allow_sampling": True,
995
- "allow_logprobs": True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
996
  "allow_search_indices": False,
997
  "allow_view": True,
998
  "allow_fine_tuning": False,
@@ -1174,10 +1154,6 @@ def handsome_embeddings():
1174
  except requests.exceptions.RequestException as e:
1175
  return jsonify({"error": str(e)}), 500
1176
 
1177
- import base64
1178
- import io
1179
- from PIL import Image
1180
-
1181
  @app.route('/handsome/v1/images/generations', methods=['POST'])
1182
  def handsome_images_generations():
1183
  if not check_authorization(request):
 
5
  import json
6
  import random
7
  import uuid
8
+ import concurrent.fu
9
+ import base64
10
+ import io
11
  import threading
12
+ from PIL import Imagetures
13
  from datetime import datetime, timedelta
14
  from apscheduler.schedulers.background import BackgroundScheduler
15
  from flask import Flask, request, jsonify, Response, stream_with_context
 
559
  return jsonify({"error": "Invalid request data"}), 400
560
 
561
  model_name = data['model']
562
+
563
  request_type = determine_request_type(
564
  model_name,
565
+ text_models,
566
+ free_text_models,
567
+ image_models,
568
+ free_image_models
569
  )
570
+
571
  api_key = select_key(request_type, model_name)
572
 
573
  if not api_key:
 
580
  )
581
  }
582
  ), 429
583
+
584
  headers = {
585
  "Authorization": f"Bearer {api_key}",
586
  "Content-Type": "application/json"
587
  }
588
+
589
+ if model_name in image_models or model_name in free_image_models:
590
+ # Handle image model
591
+ user_content = ""
592
+ messages = data.get("messages", [])
593
+ for message in messages:
594
+ if message["role"] == "user":
595
+ if isinstance(message["content"], str):
596
+ user_content += message["content"] + " "
597
+ elif isinstance(message["content"], list):
598
+ for item in message["content"]:
599
+ if (
600
+ isinstance(item, dict) and
601
+ item.get("type") == "text"
602
+ ):
603
+ user_content += (
604
+ item.get("text", "") +
605
+ " "
606
+ )
607
+ user_content = user_content.strip()
608
+
609
  siliconflow_data = {
610
  "model": model_name,
611
+ "prompt": user_content,
612
+ "image_size": "1024x1024",
613
+ "batch_size": 1,
614
+ "num_inference_steps": 20,
615
+ "guidance_scale": 7.5,
616
+ "negative_prompt": None,
617
+ "seed": None,
618
  "prompt_enhancement": False,
619
  }
620
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
  try:
622
  start_time = time.time()
623
  response = requests.post(
 
626
  json=siliconflow_data,
627
  timeout=120
628
  )
629
+
630
  if response.status_code == 429:
631
  return jsonify(response.json()), 429
632
 
 
634
  end_time = time.time()
635
  response_json = response.json()
636
  total_time = end_time - start_time
637
+
638
  try:
639
  images = response_json.get("images", [])
640
+ openai_images = []
641
+ for image_url in images:
642
+ openai_images.append({"url": image_url})
643
+
 
 
 
 
 
 
 
644
  response_data = {
645
+ "created": int(time.time()),
646
+ "data": openai_images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647
  }
648
 
649
  except (KeyError, ValueError, IndexError) as e:
 
652
  f"完整内容: {response_json}"
653
  )
654
  response_data = {
655
+ "created": int(time.time()),
656
+ "data": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657
  }
658
 
659
  logging.info(
660
  f"使用的key: {api_key}, "
661
  f"总共用时: {total_time:.4f}秒, "
662
+ f"使用的模型: {model_name}, "
663
+ f"用户的内容: {user_content}"
664
  )
665
 
666
  with data_lock:
 
668
  token_counts.append(0) # Image generation doesn't use tokens
669
 
670
  return jsonify(response_data)
671
+
672
  except requests.exceptions.RequestException as e:
673
  logging.error(f"请求转发异常: {e}")
674
  return jsonify({"error": str(e)}), 500
675
+
676
  else:
677
+ # Handle text model
678
  try:
679
  start_time = time.time()
680
  response = requests.post(
 
684
  stream=data.get("stream", False),
685
  timeout=60
686
  )
687
+
688
  if response.status_code == 429:
689
  return jsonify(response.json()), 429
690
 
 
836
  item.get("type") == "text"
837
  ):
838
  user_content += (
839
+ item.get("text", "") + " "
 
840
  )
841
 
842
  user_content = user_content.strip()
 
878
 
879
  detailed_models = []
880
 
881
+ # 添加文本模型
882
  for model in text_models:
883
  detailed_models.append({
884
  "id": model,
 
905
  "parent": None
906
  })
907
 
908
+ # 添加 embedding 模型
909
  for model in embedding_models:
910
  detailed_models.append({
911
  "id": model,
 
931
  "root": model,
932
  "parent": None
933
  })
934
+
935
+ # 添加图像模型
936
  for model in image_models:
937
+ detailed_models.append({
938
  "id": model,
939
  "object": "model",
940
  "created": 1678888888,
 
946
  "created": 1678888888,
947
  "allow_create_engine": False,
948
  "allow_sampling": True,
949
+ "allow_logprobs": False,
950
+ "allow_search_indices": False,
951
+ "allow_view": True,
952
+ "allow_fine_tuning": False,
953
+ "organization": "*",
954
+ "group": None,
955
+ "is_blocking": False
956
+ }
957
+ ],
958
+ "root": model,
959
+ "parent": None
960
+ })
961
+
962
+ for model in free_image_models:
963
+ detailed_models.append({
964
+ "id": model,
965
+ "object": "model",
966
+ "created": 1678888888,
967
+ "owned_by": "openai",
968
+ "permission": [
969
+ {
970
+ "id": f"modelperm-{uuid.uuid4().hex}",
971
+ "object": "model_permission",
972
+ "created": 1678888888,
973
+ "allow_create_engine": False,
974
+ "allow_sampling": True,
975
+ "allow_logprobs": False,
976
  "allow_search_indices": False,
977
  "allow_view": True,
978
  "allow_fine_tuning": False,
 
1154
  except requests.exceptions.RequestException as e:
1155
  return jsonify({"error": str(e)}), 500
1156
 
 
 
 
 
1157
  @app.route('/handsome/v1/images/generations', methods=['POST'])
1158
  def handsome_images_generations():
1159
  if not check_authorization(request):