Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,53 +9,83 @@ import supervision as sv
|
|
9 |
IOU_THRESHOLD = 0.3
|
10 |
CONFIDENCE_THRESHOLD = 0.2
|
11 |
|
12 |
-
pretrained_path = "
|
13 |
-
image_path_list = ["img_0.jpg", "img_1.jpg"]
|
14 |
-
|
15 |
model = YOLO(pretrained_path)
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
|
61 |
|
@@ -67,5 +97,27 @@ def greet(n):
|
|
67 |
print(zero.device) # <-- 'cuda:0' 🤗
|
68 |
return f"Hello {zero + n} Tensor"
|
69 |
|
70 |
-
demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
IOU_THRESHOLD = 0.3
|
10 |
CONFIDENCE_THRESHOLD = 0.2
|
11 |
|
12 |
+
pretrained_path = "erax_nsfw_v1.onnx"
|
|
|
|
|
13 |
model = YOLO(pretrained_path)
|
14 |
+
|
15 |
+
# image_path_list = ["img_0.jpg", "img_1.jpg"]
|
16 |
+
|
17 |
+
|
18 |
+
# results = model(image_path_list,
|
19 |
+
# conf=CONFIDENCE_THRESHOLD,
|
20 |
+
# iou=IOU_THRESHOLD
|
21 |
+
# )
|
22 |
+
|
23 |
+
|
24 |
+
# for result in results:
|
25 |
+
# annotated_image = result.orig_img.copy()
|
26 |
+
# h, w = annotated_image.shape[:2]
|
27 |
+
# anchor = h if h > w else w
|
28 |
+
|
29 |
+
# # make_love class will cover entire context !!!
|
30 |
+
# # selected_classes = [0, 1, 2, 3, 4, 5] # all classes
|
31 |
+
# selected_classes = [0, 2, 3, 4, 5] # hidden make_love class
|
32 |
+
# detections = sv.Detections.from_ultralytics(result)
|
33 |
+
# detections = detections[np.isin(detections.class_id, selected_classes)]
|
34 |
|
35 |
+
# # box_annotator = sv.BoxAnnotator()
|
36 |
+
# # annotated_image = box_annotator.annotate(
|
37 |
+
# # annotated_image,
|
38 |
+
# # detections=detections
|
39 |
+
# # )
|
40 |
|
41 |
+
# # blur_annotator = sv.BlurAnnotator(kernel_size=anchor/50)
|
42 |
+
# # annotated_image = blur_annotator.annotate(
|
43 |
+
# # annotated_image.copy(),
|
44 |
+
# # detections=detections
|
45 |
+
# # )
|
46 |
+
|
47 |
+
# label_annotator = sv.LabelAnnotator(text_color=sv.Color.BLACK,
|
48 |
+
# text_scale=anchor/1700)
|
49 |
+
# annotated_image = label_annotator.annotate(
|
50 |
+
# annotated_image,
|
51 |
+
# detections=detections
|
52 |
+
# )
|
53 |
+
|
54 |
+
# pixelate_annotator = sv.PixelateAnnotator(pixel_size=anchor/50)
|
55 |
+
# annotated_image = pixelate_annotator.annotate(
|
56 |
+
# scene=annotated_image.copy(),
|
57 |
+
# detections=detections
|
58 |
+
# )
|
59 |
|
60 |
+
# sv.plot_image(annotated_image, size=(10, 10))
|
61 |
+
|
62 |
+
|
63 |
+
def yolov8_inference(
|
64 |
+
image,
|
65 |
+
image_size,
|
66 |
+
conf_threshold,
|
67 |
+
iou_threshold,
|
68 |
+
):
|
69 |
+
"""
|
70 |
+
YOLOv8 inference function
|
71 |
+
Args:
|
72 |
+
image: Input image
|
73 |
+
model_path: Path to the model
|
74 |
+
image_size: Image size
|
75 |
+
conf_threshold: Confidence threshold
|
76 |
+
iou_threshold: IOU threshold
|
77 |
+
Returns:
|
78 |
+
Rendered image
|
79 |
+
"""
|
80 |
+
model = YOLO('erax-ai/EraX-NSFW-V1.0')
|
81 |
+
# set model parameters
|
82 |
+
model.overrides['conf'] = conf_threshold # NMS confidence threshold
|
83 |
+
model.overrides['iou'] = iou_threshold # NMS IoU threshold
|
84 |
+
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
85 |
+
model.overrides['max_det'] = 1000 # maximum number of detections per image
|
86 |
+
results = model.predict(image, imgsz=image_size)
|
87 |
+
render = render_result(model=model, image=image, result=results[0])
|
88 |
+
return render
|
89 |
|
90 |
|
91 |
|
|
|
97 |
print(zero.device) # <-- 'cuda:0' 🤗
|
98 |
return f"Hello {zero + n} Tensor"
|
99 |
|
100 |
+
# demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
|
101 |
+
|
102 |
+
inputs = [
|
103 |
+
gr.Image(type="filepath", label="Input Image"),
|
104 |
+
gr.Slider(minimum=320, maximum=1280, value=640, step=320, label="Image Size"),
|
105 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold"),
|
106 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
|
107 |
+
]
|
108 |
+
|
109 |
+
outputs = gr.Image(type="filepath", label="Output Image")
|
110 |
+
title = "State-of-the-Art YOLO Models for Object detection"
|
111 |
+
|
112 |
+
# examples = [['demo_01.jpg', 'yolov8n', 640, 0.25, 0.45], ['demo_02.jpg', 'yolov8l', 640, 0.25, 0.45], ['demo_03.jpg', 'yolov8x', 1280, 0.25, 0.45]]
|
113 |
+
demo_app = gr.Interface(
|
114 |
+
fn=yolov8_inference,
|
115 |
+
inputs=inputs,
|
116 |
+
outputs=outputs,
|
117 |
+
title=title,
|
118 |
+
# examples=examples,
|
119 |
+
# cache_examples=True,
|
120 |
+
)
|
121 |
+
demo_app.launch(debug=True)
|
122 |
+
|
123 |
+
# demo.launch()
|