yangtb24 commited on
Commit
3ae260c
1 Parent(s): ec74129

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -85
app.py CHANGED
@@ -5,11 +5,8 @@ import requests
5
  import json
6
  import random
7
  import uuid
8
- import concurrent.fu
9
- import base64
10
- import io
11
  import threading
12
- from PIL import Imagetures
13
  from datetime import datetime, timedelta
14
  from apscheduler.schedulers.background import BackgroundScheduler
15
  from flask import Flask, request, jsonify, Response, stream_with_context
@@ -562,10 +559,8 @@ def handsome_chat_completions():
562
 
563
  request_type = determine_request_type(
564
  model_name,
565
- text_models,
566
- free_text_models,
567
- image_models,
568
- free_image_models
569
  )
570
 
571
  api_key = select_key(request_type, model_name)
@@ -580,44 +575,46 @@ def handsome_chat_completions():
580
  )
581
  }
582
  ), 429
583
-
584
  headers = {
585
  "Authorization": f"Bearer {api_key}",
586
  "Content-Type": "application/json"
587
  }
588
-
589
- if model_name in image_models or model_name in free_image_models:
590
- # Handle image model
591
- user_content = ""
592
- messages = data.get("messages", [])
593
- for message in messages:
594
- if message["role"] == "user":
595
- if isinstance(message["content"], str):
596
- user_content += message["content"] + " "
597
- elif isinstance(message["content"], list):
598
- for item in message["content"]:
599
- if (
600
- isinstance(item, dict) and
601
- item.get("type") == "text"
602
- ):
603
- user_content += (
604
- item.get("text", "") +
605
- " "
606
- )
607
- user_content = user_content.strip()
608
-
609
  siliconflow_data = {
610
  "model": model_name,
611
- "prompt": user_content,
612
- "image_size": "1024x1024",
613
- "batch_size": 1,
614
- "num_inference_steps": 20,
615
- "guidance_scale": 7.5,
616
- "negative_prompt": None,
617
- "seed": None,
618
  "prompt_enhancement": False,
619
  }
620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
  try:
622
  start_time = time.time()
623
  response = requests.post(
@@ -626,7 +623,7 @@ def handsome_chat_completions():
626
  json=siliconflow_data,
627
  timeout=120
628
  )
629
-
630
  if response.status_code == 429:
631
  return jsonify(response.json()), 429
632
 
@@ -637,13 +634,32 @@ def handsome_chat_completions():
637
 
638
  try:
639
  images = response_json.get("images", [])
640
- openai_images = []
641
- for image_url in images:
642
- openai_images.append({"url": image_url})
643
-
 
 
 
 
 
 
 
644
  response_data = {
645
- "created": int(time.time()),
646
- "data": openai_images
 
 
 
 
 
 
 
 
 
 
 
 
647
  }
648
 
649
  except (KeyError, ValueError, IndexError) as e:
@@ -652,15 +668,26 @@ def handsome_chat_completions():
652
  f"完整内容: {response_json}"
653
  )
654
  response_data = {
655
- "created": int(time.time()),
656
- "data": []
 
 
 
 
 
 
 
 
 
 
 
 
657
  }
658
 
659
  logging.info(
660
  f"使用的key: {api_key}, "
661
  f"总共用时: {total_time:.4f}秒, "
662
- f"使用的模型: {model_name}, "
663
- f"用户的内容: {user_content}"
664
  )
665
 
666
  with data_lock:
@@ -668,13 +695,11 @@ def handsome_chat_completions():
668
  token_counts.append(0) # Image generation doesn't use tokens
669
 
670
  return jsonify(response_data)
671
-
672
  except requests.exceptions.RequestException as e:
673
  logging.error(f"请求转发异常: {e}")
674
  return jsonify({"error": str(e)}), 500
675
-
676
  else:
677
- # Handle text model
678
  try:
679
  start_time = time.time()
680
  response = requests.post(
@@ -684,7 +709,6 @@ def handsome_chat_completions():
684
  stream=data.get("stream", False),
685
  timeout=60
686
  )
687
-
688
  if response.status_code == 429:
689
  return jsonify(response.json()), 429
690
 
@@ -836,7 +860,8 @@ def handsome_chat_completions():
836
  item.get("type") == "text"
837
  ):
838
  user_content += (
839
- item.get("text", "") + " "
 
840
  )
841
 
842
  user_content = user_content.strip()
@@ -878,7 +903,6 @@ def list_models():
878
 
879
  detailed_models = []
880
 
881
- # 添加文本模型
882
  for model in text_models:
883
  detailed_models.append({
884
  "id": model,
@@ -905,7 +929,6 @@ def list_models():
905
  "parent": None
906
  })
907
 
908
- # 添加 embedding 模型
909
  for model in embedding_models:
910
  detailed_models.append({
911
  "id": model,
@@ -931,36 +954,9 @@ def list_models():
931
  "root": model,
932
  "parent": None
933
  })
934
-
935
- # 添加图像模型
936
- for model in image_models:
937
- detailed_models.append({
938
- "id": model,
939
- "object": "model",
940
- "created": 1678888888,
941
- "owned_by": "openai",
942
- "permission": [
943
- {
944
- "id": f"modelperm-{uuid.uuid4().hex}",
945
- "object": "model_permission",
946
- "created": 1678888888,
947
- "allow_create_engine": False,
948
- "allow_sampling": True,
949
- "allow_logprobs": False,
950
- "allow_search_indices": False,
951
- "allow_view": True,
952
- "allow_fine_tuning": False,
953
- "organization": "*",
954
- "group": None,
955
- "is_blocking": False
956
- }
957
- ],
958
- "root": model,
959
- "parent": None
960
- })
961
 
962
- for model in free_image_models:
963
- detailed_models.append({
964
  "id": model,
965
  "object": "model",
966
  "created": 1678888888,
@@ -972,7 +968,7 @@ def list_models():
972
  "created": 1678888888,
973
  "allow_create_engine": False,
974
  "allow_sampling": True,
975
- "allow_logprobs": False,
976
  "allow_search_indices": False,
977
  "allow_view": True,
978
  "allow_fine_tuning": False,
@@ -1154,6 +1150,10 @@ def handsome_embeddings():
1154
  except requests.exceptions.RequestException as e:
1155
  return jsonify({"error": str(e)}), 500
1156
 
 
 
 
 
1157
  @app.route('/handsome/v1/images/generations', methods=['POST'])
1158
  def handsome_images_generations():
1159
  if not check_authorization(request):
 
5
  import json
6
  import random
7
  import uuid
8
+ import concurrent.futures
 
 
9
  import threading
 
10
  from datetime import datetime, timedelta
11
  from apscheduler.schedulers.background import BackgroundScheduler
12
  from flask import Flask, request, jsonify, Response, stream_with_context
 
559
 
560
  request_type = determine_request_type(
561
  model_name,
562
+ text_models + image_models,
563
+ free_text_models + free_image_models
 
 
564
  )
565
 
566
  api_key = select_key(request_type, model_name)
 
575
  )
576
  }
577
  ), 429
578
+
579
  headers = {
580
  "Authorization": f"Bearer {api_key}",
581
  "Content-Type": "application/json"
582
  }
583
+
584
+ if model_name in image_models:
585
+ # Handle image generation
586
+ # Map OpenAI-style parameters to SiliconFlow's parameters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
  siliconflow_data = {
588
  "model": model_name,
589
+ "prompt": data.get("messages", [{}])[0].get("content") if isinstance(data.get("messages"), list) else "",
590
+ "image_size": data.get("size", "1024x1024"),
591
+ "batch_size": data.get("n", 1),
592
+ "num_inference_steps": data.get("steps", 20),
593
+ "guidance_scale": data.get("guidance_scale", 7.5),
594
+ "negative_prompt": data.get("negative_prompt"),
595
+ "seed": data.get("seed"),
596
  "prompt_enhancement": False,
597
  }
598
 
599
+ # Parameter validation and adjustments
600
+ if siliconflow_data["batch_size"] < 1:
601
+ siliconflow_data["batch_size"] = 1
602
+ if siliconflow_data["batch_size"] > 4:
603
+ siliconflow_data["batch_size"] = 4
604
+
605
+ if siliconflow_data["num_inference_steps"] < 1:
606
+ siliconflow_data["num_inference_steps"] = 1
607
+ if siliconflow_data["num_inference_steps"] > 50:
608
+ siliconflow_data["num_inference_steps"] = 50
609
+
610
+ if siliconflow_data["guidance_scale"] < 0:
611
+ siliconflow_data["guidance_scale"] = 0
612
+ if siliconflow_data["guidance_scale"] > 100:
613
+ siliconflow_data["guidance_scale"] = 100
614
+
615
+ if siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024"]:
616
+ siliconflow_data["image_size"] = "1024x1024"
617
+
618
  try:
619
  start_time = time.time()
620
  response = requests.post(
 
623
  json=siliconflow_data,
624
  timeout=120
625
  )
626
+
627
  if response.status_code == 429:
628
  return jsonify(response.json()), 429
629
 
 
634
 
635
  try:
636
  images = response_json.get("images", [])
637
+
638
+ # Extract the first URL if available
639
+ image_url = ""
640
+ if images and isinstance(images[0], dict) and "url" in images[0]:
641
+ image_url = images[0]["url"]
642
+ logging.info(f"Extracted image URL: {image_url}")
643
+ elif images and isinstance(images[0], str):
644
+ image_url = images[0]
645
+ logging.info(f"Extracted image URL: {image_url}")
646
+
647
+ # Construct the expected JSON output - Mimicking OpenAI
648
  response_data = {
649
+ "id": f"chatcmpl-{uuid.uuid4()}",
650
+ "object": "chat.completion",
651
+ "created": int(time.time()),
652
+ "model": model_name,
653
+ "choices": [
654
+ {
655
+ "index": 0,
656
+ "message": {
657
+ "role": "assistant",
658
+ "content": image_url if image_url else "Failed to generate image", # Directly return the URL in content
659
+ },
660
+ "finish_reason": "stop",
661
+ }
662
+ ],
663
  }
664
 
665
  except (KeyError, ValueError, IndexError) as e:
 
668
  f"完整内容: {response_json}"
669
  )
670
  response_data = {
671
+ "id": f"chatcmpl-{uuid.uuid4()}",
672
+ "object": "chat.completion",
673
+ "created": int(time.time()),
674
+ "model": model_name,
675
+ "choices": [
676
+ {
677
+ "index": 0,
678
+ "message": {
679
+ "role": "assistant",
680
+ "content": "Failed to process image data",
681
+ },
682
+ "finish_reason": "stop",
683
+ }
684
+ ],
685
  }
686
 
687
  logging.info(
688
  f"使用的key: {api_key}, "
689
  f"总共用时: {total_time:.4f}秒, "
690
+ f"使用的模型: {model_name}"
 
691
  )
692
 
693
  with data_lock:
 
695
  token_counts.append(0) # Image generation doesn't use tokens
696
 
697
  return jsonify(response_data)
 
698
  except requests.exceptions.RequestException as e:
699
  logging.error(f"请求转发异常: {e}")
700
  return jsonify({"error": str(e)}), 500
 
701
  else:
702
+ # Existing text-based model handling logic
703
  try:
704
  start_time = time.time()
705
  response = requests.post(
 
709
  stream=data.get("stream", False),
710
  timeout=60
711
  )
 
712
  if response.status_code == 429:
713
  return jsonify(response.json()), 429
714
 
 
860
  item.get("type") == "text"
861
  ):
862
  user_content += (
863
+ item.get("text", "") +
864
+ " "
865
  )
866
 
867
  user_content = user_content.strip()
 
903
 
904
  detailed_models = []
905
 
 
906
  for model in text_models:
907
  detailed_models.append({
908
  "id": model,
 
929
  "parent": None
930
  })
931
 
 
932
  for model in embedding_models:
933
  detailed_models.append({
934
  "id": model,
 
954
  "root": model,
955
  "parent": None
956
  })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
957
 
958
+ for model in image_models:
959
+ detailed_models.append({
960
  "id": model,
961
  "object": "model",
962
  "created": 1678888888,
 
968
  "created": 1678888888,
969
  "allow_create_engine": False,
970
  "allow_sampling": True,
971
+ "allow_logprobs": True,
972
  "allow_search_indices": False,
973
  "allow_view": True,
974
  "allow_fine_tuning": False,
 
1150
  except requests.exceptions.RequestException as e:
1151
  return jsonify({"error": str(e)}), 500
1152
 
1153
+ import base64
1154
+ import io
1155
+ from PIL import Image
1156
+
1157
  @app.route('/handsome/v1/images/generations', methods=['POST'])
1158
  def handsome_images_generations():
1159
  if not check_authorization(request):