Spaces:
Runtime error
Runtime error
SerdarHelli
commited on
Commit
•
7174da6
1
Parent(s):
6f4e6ed
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,79 @@ import random
|
|
7 |
import cv2
|
8 |
import torch
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
image_list = [
|
12 |
"data/1.png",
|
@@ -41,10 +114,10 @@ def visualize_instance_seg_mask(mask):
|
|
41 |
return image
|
42 |
|
43 |
|
44 |
-
def Segformer_Segmentation(image_path, model_id):
|
45 |
output_save = "output.png"
|
46 |
|
47 |
-
test_image =
|
48 |
|
49 |
model = SegformerForSemanticSegmentation.from_pretrained(model_id)
|
50 |
proccessor = SegformerImageProcessor(model_id)
|
@@ -55,9 +128,13 @@ def Segformer_Segmentation(image_path, model_id):
|
|
55 |
|
56 |
result = proccessor.post_process_semantic_segmentation(outputs)[0]
|
57 |
result = np.array(result)
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
60 |
|
|
|
61 |
return image_path, output_save
|
62 |
|
63 |
examples = [[image_list[0], "deprem-ml/deprem_satellite_semantic_whu"],
|
@@ -75,6 +152,8 @@ with app:
|
|
75 |
gr.Markdown("Video")
|
76 |
input_video = gr.Image(type='filepath')
|
77 |
model_id = gr.Dropdown(value=model_path[0], choices=model_path)
|
|
|
|
|
78 |
input_video_button = gr.Button(value="Predict")
|
79 |
|
80 |
with gr.Column():
|
@@ -85,6 +164,6 @@ with app:
|
|
85 |
|
86 |
|
87 |
gr.Examples(examples, inputs=[input_video, model_id], outputs=[output_orijinal_image, output_mask_image], fn=Segformer_Segmentation, cache_examples=True)
|
88 |
-
input_video_button.click(Segformer_Segmentation, inputs=[input_video, model_id], outputs=[output_orijinal_image, output_mask_image])
|
89 |
|
90 |
-
app.launch()
|
|
|
7 |
import cv2
|
8 |
import torch
|
9 |
|
10 |
+
from imutils import perspective
|
11 |
+
|
12 |
+
|
13 |
+
def midpoint(ptA, ptB):
|
14 |
+
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
|
15 |
+
# Load in image, convert to gray scale, and Otsu's threshold
|
16 |
+
kernel1 =( np.ones((5,5), dtype=np.float32))
|
17 |
+
blur_radius=0.5
|
18 |
+
kernel_sharpening = np.array([[-1,-1,-1],
|
19 |
+
[-1,9,-1],
|
20 |
+
[-1,-1,-1]])*(1/9)
|
21 |
+
|
22 |
+
|
23 |
+
def cca_analysis(image,predicted_mask):
|
24 |
+
|
25 |
+
image2=np.asarray(image)
|
26 |
+
print(image.shape)
|
27 |
+
image = cv2.resize(predicted_mask, (image2.shape[1],image2.shape[1]), interpolation = cv2.INTER_AREA)
|
28 |
+
|
29 |
+
image=cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel1,iterations=1 )
|
30 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
31 |
+
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
|
32 |
+
|
33 |
+
labels=cv2.connectedComponents(thresh,connectivity=8)[1]
|
34 |
+
a=np.unique(labels)
|
35 |
+
count2=0
|
36 |
+
for label in a:
|
37 |
+
if label == 0:
|
38 |
+
continue
|
39 |
+
|
40 |
+
# Create a mask
|
41 |
+
mask = np.zeros(thresh.shape, dtype="uint8")
|
42 |
+
mask[labels == label] = 255
|
43 |
+
|
44 |
+
# Find contours and determine contour area
|
45 |
+
cnts,hieararch = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
46 |
+
cnts = cnts[0]
|
47 |
+
c_area = cv2.contourArea(cnts)
|
48 |
+
# threshhold for tooth count
|
49 |
+
if c_area>100:
|
50 |
+
count2+=1
|
51 |
+
|
52 |
+
rect = cv2.minAreaRect(cnts)
|
53 |
+
box = cv2.boxPoints(rect)
|
54 |
+
box = np.array(box, dtype="int")
|
55 |
+
box = perspective.order_points(box)
|
56 |
+
color1 = (list(np.random.choice(range(150), size=3)))
|
57 |
+
color =[int(color1[0]), int(color1[1]), int(color1[2])]
|
58 |
+
cv2.drawContours(image2,[box.astype("int")],0,color,2)
|
59 |
+
(tl,tr,br,bl)=box
|
60 |
+
|
61 |
+
(tltrX,tltrY)=midpoint(tl,tr)
|
62 |
+
(blbrX,blbrY)=midpoint(bl,br)
|
63 |
+
# compute the midpoint between the top-left and top-right points,
|
64 |
+
# followed by the midpoint between the top-righ and bottom-right
|
65 |
+
(tlblX,tlblY)=midpoint(tl,bl)
|
66 |
+
(trbrX,trbrY)=midpoint(tr,br)
|
67 |
+
# draw the midpoints on the image
|
68 |
+
cv2.circle(image2, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
|
69 |
+
cv2.circle(image2, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
|
70 |
+
cv2.circle(image2, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
|
71 |
+
cv2.circle(image2, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
|
72 |
+
cv2.line(image2, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),color, 2)
|
73 |
+
cv2.line(image2, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),color, 2)
|
74 |
+
return image2
|
75 |
+
|
76 |
+
def to_rgb(img):
|
77 |
+
result_new=np.zeros((img.shape[1],img.shape[0],3))
|
78 |
+
result_new[:,:,0]=img
|
79 |
+
result_new[:,:,1]=img
|
80 |
+
result_new[:,:,2]=img
|
81 |
+
result_new=np.uint8(result_new*255)
|
82 |
+
return result_new
|
83 |
|
84 |
image_list = [
|
85 |
"data/1.png",
|
|
|
114 |
return image
|
115 |
|
116 |
|
117 |
+
def Segformer_Segmentation(image_path, model_id,postpro):
|
118 |
output_save = "output.png"
|
119 |
|
120 |
+
test_image = cv2.imread(image_path)
|
121 |
|
122 |
model = SegformerForSemanticSegmentation.from_pretrained(model_id)
|
123 |
proccessor = SegformerImageProcessor(model_id)
|
|
|
128 |
|
129 |
result = proccessor.post_process_semantic_segmentation(outputs)[0]
|
130 |
result = np.array(result)
|
131 |
+
if postpro=="Connected Components Labelling":
|
132 |
+
result=to_rgb(result)
|
133 |
+
result=cca_analysis(test_image,result)
|
134 |
+
else:
|
135 |
+
result = visualize_instance_seg_mask(result)
|
136 |
|
137 |
+
cv2.imwrite(output_save, result)
|
138 |
return image_path, output_save
|
139 |
|
140 |
examples = [[image_list[0], "deprem-ml/deprem_satellite_semantic_whu"],
|
|
|
152 |
gr.Markdown("Video")
|
153 |
input_video = gr.Image(type='filepath')
|
154 |
model_id = gr.Dropdown(value=model_path[0], choices=model_path)
|
155 |
+
cca = gr.Dropdown(value="Connected Components Labelling", choices=["Connected Components Labelling","No Post Process"])
|
156 |
+
|
157 |
input_video_button = gr.Button(value="Predict")
|
158 |
|
159 |
with gr.Column():
|
|
|
164 |
|
165 |
|
166 |
gr.Examples(examples, inputs=[input_video, model_id], outputs=[output_orijinal_image, output_mask_image], fn=Segformer_Segmentation, cache_examples=True)
|
167 |
+
input_video_button.click(Segformer_Segmentation, inputs=[input_video, model_id,cca], outputs=[output_orijinal_image, output_mask_image])
|
168 |
|
169 |
+
app.launch(debug=True)
|