Spaces:
Runtime error
Runtime error
Daryl Fung
commited on
Commit
·
7fea409
1
Parent(s):
cb2271b
added labels
Browse files- app.py +20 -2
- yolov5/joint_all_multi.yaml +8 -0
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import numpy as np
|
|
|
4 |
|
5 |
from yolov5.models.common import DetectMultiBackend
|
6 |
from yolov5.utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
|
@@ -23,6 +24,9 @@ hide_labels = False
|
|
23 |
hide_conf = True
|
24 |
line_thickness = 1
|
25 |
|
|
|
|
|
|
|
26 |
def joint_detection(img0):
|
27 |
global imgsz
|
28 |
img = letterbox(img0, 640, stride=stride, auto=pt)[0]
|
@@ -66,9 +70,23 @@ def joint_detection(img0):
|
|
66 |
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
67 |
annotator.box_label(xyxy, label, color=colors(c, True))
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
# Stream results
|
70 |
im0 = annotator.result()
|
71 |
-
return im0
|
72 |
# if view_img:
|
73 |
# cv2.imshow(str(p), im0)
|
74 |
# cv2.waitKey(1) # 1 millisecond
|
@@ -94,5 +112,5 @@ def joint_detection(img0):
|
|
94 |
|
95 |
# Print time (inference-only)
|
96 |
|
97 |
-
iface = gr.Interface(fn=joint_detection, inputs="image", outputs="image")
|
98 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
+
import yaml
|
5 |
|
6 |
from yolov5.models.common import DetectMultiBackend
|
7 |
from yolov5.utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
|
|
|
24 |
hide_conf = True
|
25 |
line_thickness = 1
|
26 |
|
27 |
+
with open('yolov5/joint_all_multi.yaml', 'r') as f:
|
28 |
+
LABELS = yaml.safe_load(f)['names']
|
29 |
+
|
30 |
def joint_detection(img0):
|
31 |
global imgsz
|
32 |
img = letterbox(img0, 640, stride=stride, auto=pt)[0]
|
|
|
70 |
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
71 |
annotator.box_label(xyxy, label, color=colors(c, True))
|
72 |
|
73 |
+
# save as text
|
74 |
+
# Write results
|
75 |
+
content = {}
|
76 |
+
for *xyxy, conf, cls in reversed(det):
|
77 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
78 |
+
x, y, width, height = xywh
|
79 |
+
current_label = LABELS[int(cls.item())]
|
80 |
+
|
81 |
+
if content.get(current_label, None) is None:
|
82 |
+
content[current_label] = []
|
83 |
+
|
84 |
+
current_dict = {'x': x, 'y': y, 'width': width, 'height': height}
|
85 |
+
content[current_label].append(current_dict) # label format
|
86 |
+
|
87 |
# Stream results
|
88 |
im0 = annotator.result()
|
89 |
+
return im0, content
|
90 |
# if view_img:
|
91 |
# cv2.imshow(str(p), im0)
|
92 |
# cv2.waitKey(1) # 1 millisecond
|
|
|
112 |
|
113 |
# Print time (inference-only)
|
114 |
|
115 |
+
iface = gr.Interface(fn=joint_detection, inputs="image", outputs=["image", "json"])
|
116 |
iface.launch()
|
yolov5/joint_all_multi.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
path: ../datasets/joint_all_multi # dataset root dir
|
2 |
+
train: train.txt # train images (relative to 'path') 118287 images
|
3 |
+
val: eval.txt # train images (relative to 'path') 5000 images
|
4 |
+
#test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
|
5 |
+
|
6 |
+
# Classes
|
7 |
+
nc: 5 # number of classes
|
8 |
+
names: ['PIP', 'MCP', 'Wrist', 'Radius', 'Ulna']
|