kenton-li commited on
Commit
dc276a0
·
1 Parent(s): 754575c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -40
app.py CHANGED
@@ -1,22 +1,9 @@
1
  import gradio as gr
2
- import sahi
3
  import torch
4
- from ultralyticsplus import *
5
-
6
- # Images
7
- sahi.utils.file.download_from_url(
8
- "https://raw.githubusercontent.com/kadirnar/dethub/main/data/images/highway.jpg",
9
- "highway.jpg",
10
- )
11
- sahi.utils.file.download_from_url(
12
- "https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg",
13
- "small-vehicles1.jpeg",
14
- )
15
- sahi.utils.file.download_from_url(
16
- "https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/zidane.jpg",
17
- "zidane.jpg",
18
- )
19
-
20
 
21
  model_names = [
22
  "yolov8n-seg.pt",
@@ -29,13 +16,70 @@ model_names = [
29
  current_model_name = "yolov8m-seg.pt"
30
  model = YOLO(current_model_name)
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  def yolov8_inference(
34
- image: gr.inputs.Image = None,
35
- model_name: gr.inputs.Dropdown = None,
36
- image_size: gr.inputs.Slider = 640,
37
- conf_threshold: gr.inputs.Slider = 0.25,
38
- iou_threshold: gr.inputs.Slider = 0.45,
 
39
  ):
40
  """
41
  YOLOv8 inference function
@@ -55,16 +99,21 @@ def yolov8_inference(
55
  current_model_name = model_name
56
  model.overrides["conf"] = conf_threshold
57
  model.overrides["iou"] = iou_threshold
58
- results = model.predict(image, imgsz=image_size, return_outputs=True)
 
59
  renders = []
60
- for image_results in model.predict(image, imgsz=image_size, return_outputs=True):
61
- render = render_model_output(
62
- model=model, image=image, model_output=image_results
63
- )
 
 
 
 
64
  renders.append(render)
 
65
 
66
- return renders[0]
67
-
68
 
69
  inputs = [
70
  gr.Image(type="filepath", label="Input Image"),
@@ -73,28 +122,26 @@ inputs = [
73
  value=current_model_name,
74
  label="Model type",
75
  ),
76
- gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"),
 
 
77
  gr.Slider(
78
  minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
79
  ),
80
  gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
81
  ]
82
 
83
- outputs = gr.Image(type="filepath", label="Output Image")
84
- title = "Ultralytics YOLOv8 Segmentation Demo"
 
85
 
86
- examples = [
87
- ["zidane.jpg", "yolov8m-seg.pt", 640, 0.6, 0.45],
88
- ["highway.jpg", "yolov8m-seg.pt", 640, 0.25, 0.45],
89
- ["small-vehicles1.jpeg", "yolov8m-seg.pt", 640, 0.25, 0.45],
90
- ]
91
  demo_app = gr.Interface(
92
  fn=yolov8_inference,
93
  inputs=inputs,
94
  outputs=outputs,
95
  title=title,
96
- examples=examples,
97
- cache_examples=True,
98
  theme="default",
99
  )
100
- demo_app.launch(debug=True, enable_queue=True)
 
1
  import gradio as gr
 
2
  import torch
3
+ from ultralyticsplus import YOLO, render_result
4
+ import numpy as np
5
+ from PIL import Image
6
+ import cv2
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  model_names = [
9
  "yolov8n-seg.pt",
 
16
  current_model_name = "yolov8m-seg.pt"
17
  model = YOLO(current_model_name)
18
 
19
+ def sort_instance_masks_by_centroid(instances_mask, reverse=False):
20
+ # Calculate centroid of each instance mask
21
+ centroids = []
22
+ for mask in instances_mask:
23
+ # Find contours of the mask
24
+ mask_np = mask.astype(np.uint8)
25
+ #mask_np[mask_np !=0] = 255
26
+ contours, hierarchy = cv2.findContours(mask_np, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
27
+
28
+ # Calculate moments of the contour
29
+ moments = cv2.moments(contours[0])
30
+
31
+ # Calculate centroid coordinates
32
+ c_x = int(moments["m10"] / moments["m00"])
33
+ c_y = int(moments["m01"] / moments["m00"])
34
+ centroids.append((c_x, c_y))
35
+
36
+ # Sort instance masks by centroid coordinates
37
+ sorted_instances_mask = [instance_mask for _, instance_mask in
38
+ sorted(zip(centroids, instances_mask), reverse=reverse)]
39
+
40
+ return sorted_instances_mask
41
+
42
+ def visualize_masks(masks):
43
+ masks = masks.detach().cpu().numpy()
44
+ height, width = masks.shape[1:]
45
+ # 计算有多少个 mask
46
+ num_masks = masks.shape[0]
47
+ masks = sort_instance_masks_by_centroid(masks)
48
+
49
+
50
+ # 创建一个空白图像,背景颜色为黑色
51
+ img = Image.new('RGB', (width, height),(0,0,0))
52
+ #img.putpalette([0, 0, 0] * 256)
53
+ img_array = np.array(img)
54
+ colors = []
55
+
56
+ # 将每个 mask 标记为不同的颜色
57
+ for i in range(num_masks):
58
+ color = np.random.randint(0, 256, size=3)
59
+ colors.append(tuple(color))
60
+ #colorimg.paste
61
+ #colorimg = Image.new('RGB', (width,height), color=tuple(np.random.randint(0, 256, size=3)))
62
+ #mask_img_tmp = Image.fromarray(masks[i]).convert('RGB')
63
+ #mask_array = Image.fromarray(masks[i])
64
+ img_array[masks[i] != 0,:] = color
65
+ #mask_img = mask_img.putpalette(color)
66
+ #img.paste(mask_img,(0,0),mask_img_tmp)
67
+
68
+ #img.putpalette(color + (0,) * 253)
69
+
70
+ # 将 mask 根据颜色映射显示为 RGB 图像
71
+ img_rgb = Image.fromarray(img_array)
72
+ return img_rgb,colors
73
+
74
+
75
 
76
  def yolov8_inference(
77
+ image = None,
78
+ model_name = None,
79
+ dest_width = 512,
80
+ dest_height = 512,
81
+ conf_threshold = 0.25,
82
+ iou_threshold = 0.45,
83
  ):
84
  """
85
  YOLOv8 inference function
 
99
  current_model_name = model_name
100
  model.overrides["conf"] = conf_threshold
101
  model.overrides["iou"] = iou_threshold
102
+ model.overrides["classes"] = [0]
103
+ results = model.predict(image)
104
  renders = []
105
+ colorarray = []
106
+ for image_results in model.predict(image):
107
+ #print("predict results: ",type(image_results.masks))
108
+ #render = render_result(
109
+ # model=model, image=image, result=image_results
110
+ #)
111
+ render ,colors= visualize_masks(image_results.masks.data)
112
+ render = render.resize((dest_width,dest_height))
113
  renders.append(render)
114
+ colorarray.append(colors)
115
 
116
+ return renders[0],','.join(['#%02x%02x%02x' % row for row in colorarray[0]])
 
117
 
118
  inputs = [
119
  gr.Image(type="filepath", label="Input Image"),
 
122
  value=current_model_name,
123
  label="Model type",
124
  ),
125
+ gr.inputs.Slider(minimum=128, maximum=2048, step=64, default=512, label="Width"),
126
+ gr.inputs.Slider(minimum=128, maximum=2048, step=64, default=512, label="Height"),
127
+
128
  gr.Slider(
129
  minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"
130
  ),
131
  gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
132
  ]
133
 
134
+ outputs = [gr.Image(type="filepath", label="Output Image"),gr.Textbox(label="Output Text")]
135
+ title = "Ultralytics YOLOv8 Segmentation For HumanBody Only Now"
136
+
137
 
 
 
 
 
 
138
  demo_app = gr.Interface(
139
  fn=yolov8_inference,
140
  inputs=inputs,
141
  outputs=outputs,
142
  title=title,
143
+ examples=None,
144
+ cache_examples=False,
145
  theme="default",
146
  )
147
+ demo_app.launch(debug=True, enable_queue=True)