MvitHYF commited on
Commit
713bff8
·
verified ·
1 Parent(s): 5740120

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +259 -0
  2. best.pt +3 -0
  3. runs/best.pt +3 -0
app.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from ultralyticsplus import YOLO, render_result
4
+ from PIL import Image
5
+
6
+ # torch.hub.download_url_to_file(
7
+ # 'https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Ftexashafts.com%2Fwp-content%2Fuploads%2F2016%2F04%2Fconstruction-worker.jpg', 'one.jpg')
8
+ # torch.hub.download_url_to_file(
9
+ # 'https://www.pearsonkoutcherlaw.com/wp-content/uploads/2020/06/Construction-Workers.jpg', 'two.jpg')
10
+ # torch.hub.download_url_to_file(
11
+ # 'https://nssgroup.com/wp-content/uploads/2019/02/Building-maintenance-blog.jpg', 'three.jpg')
12
+
13
+
14
+ def yoloV8_func(image: gr.inputs.Image = None, image_size = (1024, 768),
15
+ # conf_threshold: gr.inputs.Slider = 0.4,
16
+ # iou_threshold: gr.inputs.Slider = 0.50
17
+ ):
18
+ # def yoloV8_func(image):
19
+
20
+ """This function performs YOLOv8 object detection on the given image.
21
+
22
+ Args:
23
+ image (gr.inputs.Image, optional): Input image to detect objects on. Defaults to None.
24
+ image_size (gr.inputs.Slider, optional): Desired image size for the model. Defaults to 640.
25
+ conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4.
26
+ iou_threshold (gr.inputs.Slider, optional): Intersection ผover Union threshold for object detection. Defaults to 0.50.
27
+ """
28
+ # Load the YOLOv8 model from the 'best.pt' checkpoint
29
+ model_path = ('runs/best.pt')
30
+ model = YOLO(model_path)
31
+
32
+ # model.conf = 0.40 # Confidence threshold
33
+ # model.iou = 0.45 # IoU threshold
34
+ # model.agnostic = True # NMS class-agnostic
35
+ # model.multi_label = False # Whether to evaluate as multi-label classification
36
+ # model.max_det = 100 # Maximum number of detections per image
37
+
38
+ #image = Image.fromarray(image).resize(1024, 768)
39
+
40
+ # Perform object detection on the input image using the YOLOv8 model
41
+ results = model.predict(image,
42
+ # conf=conf_threshold,
43
+ # iou=iou_threshold,
44
+ imgsz=image_size
45
+ )
46
+
47
+ # Print the detected objects' information (class, coordinates, and probability)
48
+ box = results[0].boxes
49
+ print("Object type:", box.cls)
50
+ print("Coordinates:", box.xyxy)
51
+ print("Probability:", box.conf)
52
+
53
+ # Render the output image with bounding boxes around detected objects
54
+ render = render_result(model=model, image=image, result=results[0])
55
+ return render
56
+
57
+
58
+ inputs = [
59
+ gr.inputs.Image(type="filepath", label="Input Image", shape=(1024, 768)),
60
+ #interface
61
+ # gr.inputs.Slider(minimum=320, maximum=1280, default=640,
62
+ # step=32, label="Image Size"),
63
+ # gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25,
64
+ # step=0.05, label="Confidence Threshold"),
65
+ # gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45,
66
+ # step=0.05, label="IOU Threshold"),
67
+ ]
68
+
69
+ outputs = [gr.outputs.Image(type="filepath", label="Output Image")]
70
+
71
+ title = "YOLOv8 Cocoa Seed Classification"
72
+
73
+
74
+ # examples = [['one.jpg',0.5, 0.7],
75
+ # ['two.jpg', 0.5, 0.6],
76
+ # ['three.jpg',0.5, 0.8]]
77
+
78
+ yolo_app = gr.Interface(
79
+ fn=yoloV8_func,
80
+ inputs=inputs,
81
+ outputs=outputs,
82
+ title=title,
83
+ # examples=examples,
84
+ cache_examples=True,
85
+ )
86
+
87
+ # Launch the Gradio interface in debug mode with queue enabled
88
+ yolo_app.launch(debug=True, enable_queue=True)
89
+
90
+ # import gradio as gr
91
+ # import torch
92
+ # from ultralyticsplus import YOLO, render_result
93
+
94
+
95
+ # torch.hub.download_url_to_file(
96
+ # 'https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Ftexashafts.com%2Fwp-content%2Fuploads%2F2016%2F04%2Fconstruction-worker.jpg', 'one.jpg')
97
+ # torch.hub.download_url_to_file(
98
+ # 'https://www.pearsonkoutcherlaw.com/wp-content/uploads/2020/06/Construction-Workers.jpg', 'two.jpg')
99
+ # torch.hub.download_url_to_file(
100
+ # 'https://nssgroup.com/wp-content/uploads/2019/02/Building-maintenance-blog.jpg', 'three.jpg')
101
+
102
+
103
+ # def yoloV8_func(image: gr.inputs.Image = None,
104
+ # image_size: gr.inputs.Slider = 640,
105
+ # conf_threshold: gr.inputs.Slider = 0.4,
106
+ # iou_threshold: gr.inputs.Slider = 0.50):
107
+ # """This function performs YOLOv8 object detection on the given image.
108
+
109
+ # Args:
110
+ # image (gr.inputs.Image, optional): Input image to detect objects on. Defaults to None.
111
+ # image_size (gr.inputs.Slider, optional): Desired image size for the model. Defaults to 640.
112
+ # conf_threshold (gr.inputs.Slider, optional): Confidence threshold for object detection. Defaults to 0.4.
113
+ # iou_threshold (gr.inputs.Slider, optional): Intersection ผover Union threshold for object detection. Defaults to 0.50.
114
+ # """
115
+ # # Load the YOLOv8 model from the 'best.pt' checkpoint
116
+ # model_path = ('runs/best.pt')
117
+ # model = YOLO(model_path)
118
+
119
+ # # Perform object detection on the input image using the YOLOv8 model
120
+ # results = model.predict(image,
121
+ # conf=conf_threshold,
122
+ # iou=iou_threshold,
123
+ # imgsz=image_size)
124
+
125
+ # # Print the detected objects' information (class, coordinates, and probability)
126
+ # box = results[0].boxes
127
+ # print("Object type:", box.cls)
128
+ # print("Coordinates:", box.xyxy)
129
+ # print("Probability:", box.conf)
130
+
131
+ # # Render the output image with bounding boxes around detected objects
132
+ # render = render_result(model=model, image=image, result=results[0])
133
+ # return render
134
+
135
+
136
+ # inputs = [
137
+ # gr.inputs.Image(type="filepath", label="Input Image"),
138
+ # gr.inputs.Slider(minimum=320, maximum=1280, default=640,
139
+ # step=32, label="Image Size"),
140
+ # gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25,
141
+ # step=0.05, label="Confidence Threshold"),
142
+ # gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45,
143
+ # step=0.05, label="IOU Threshold"),
144
+ # ]
145
+
146
+
147
+ # outputs = gr.outputs.Image(type="filepath", label="Output Image")
148
+
149
+ # title = "YOLOv8 101: Custom Object Detection on Construction Workers"
150
+
151
+
152
+ # examples = [['one.jpg', 640, 0.5, 0.7],
153
+ # ['two.jpg', 800, 0.5, 0.6],
154
+ # ['three.jpg', 900, 0.5, 0.8]]
155
+
156
+ # yolo_app = gr.Interface(
157
+ # fn=yoloV8_func,
158
+ # inputs=inputs,
159
+ # outputs=outputs,
160
+ # title=title,
161
+ # examples=examples,
162
+ # cache_examples=True,
163
+ # )
164
+
165
+ # # Launch the Gradio interface in debug mode with queue enabled
166
+ # yolo_app.launch(debug=True, enable_queue=True)
167
+
168
+
169
+
170
+
171
+
172
+
173
+
174
+
175
+
176
+
177
+
178
+ # import gradio as gr
179
+ # import torch
180
+ # from ultralyticsplus import YOLO, render_result
181
+ # from PIL import Image
182
+
183
+ # # Load your model
184
+ # model_path = ('runs/best.pt')
185
+ # model = YOLO(model_path)
186
+ # # model.conf = 0.40
187
+ # # model.iou = 0.45
188
+ # # model.agnostic = True
189
+ # # model.multi_label = False
190
+ # # model.max_det = 100
191
+ # # model.overrides['conf'] = 0.25 # NMS confidence threshold
192
+ # # model.overrides['iou'] = 0.45 # NMS IoU threshold
193
+ # # model.overrides['agnostic_nms'] = False # NMS class-agnostic
194
+ # # model.overrides['max_det'] = 1000 # maximum number of detections per image
195
+
196
+ # #css = ".output_image {height: 40rem !important; width: 100% !important;}"
197
+
198
+ # def predict(input_image):
199
+ # try:
200
+ # # Perform inference
201
+ # results = model(input_image, size=(1024, 768), augment=True)
202
+
203
+ # # Convert result image with bounding boxes to PIL format for Gradio output
204
+ # result_image = Image.fromarray(results.render()[0])
205
+
206
+ # return result_image
207
+
208
+ # except Exception as e:
209
+ # return f"Error: {str(e)}"
210
+
211
+ # # Set up Gradio interface
212
+ # interface = gr.Interface(
213
+ # fn=predict,
214
+ # inputs=gr.inputs.Image(type="pil", label="Upload an Image1"),
215
+ # outputs=gr.outputs.Image(type="pil", label="Result1"),
216
+ # #css = css,
217
+ # title="Object Detection using YOLOv5 - NEW MODEL 2024",
218
+ # description="Upload an image to detect objects using the YOLOv5 model"
219
+ # )
220
+ # interface.launch()
221
+ # import gradio as gr
222
+ # import torch
223
+ # from ultralyticsplus import YOLO, render_result
224
+ # from PIL import Image
225
+
226
+
227
+ # torch.hub.download_url_to_file(
228
+ # 'https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Ftexashafts.com%2Fwp-content%2Fuploads%2F2016%2F04%2Fconstruction-worker.jpg', 'one.jpg')
229
+ # torch.hub.download_url_to_file(
230
+ # 'https://www.pearsonkoutcherlaw.com/wp-content/uploads/2020/06/Construction-Workers.jpg', 'two.jpg')
231
+ # torch.hub.download_url_to_file(
232
+ # 'https://nssgroup.com/wp-content/uploads/2019/02/Building-maintenance-blog.jpg', 'three.jpg')
233
+
234
+ # model_path = ('runs/best.pt')
235
+ # model = YOLO(model_path)
236
+
237
+ # def predict(input_image):
238
+ # try:
239
+ # # Perform inference
240
+ # results = model(input_image, size=(1024, 768), augment=True)
241
+
242
+ # # Convert result image with bounding boxes to PIL format for Gradio output
243
+ # result_image = Image.fromarray(results.render()[0])
244
+
245
+ # return result_image
246
+
247
+ # except Exception as e:
248
+ # return f"Error: {str(e)}"
249
+
250
+ # # Set up Gradio interface
251
+ # interface = gr.Interface(
252
+ # fn=predict,
253
+ # inputs=gr.inputs.Image(type="pil", label="Upload an Image1"),
254
+ # outputs=gr.outputs.Image(type="pil", label="Result1"),
255
+ # #css = css,
256
+ # title="Object Detection using YOLOv8 - NEW MODEL 2024",
257
+ # description="Upload an image to detect objects using the YOLOv8 model"
258
+ # )
259
+ # interface.launch()
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c0c102143d76641779e797f2c49b289cec0d534a66ca99d9c101d656770acb5
3
+ size 6280494
runs/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c0c102143d76641779e797f2c49b289cec0d534a66ca99d9c101d656770acb5
3
+ size 6280494