AI-Naga commited on
Commit
4590edf
·
1 Parent(s): 0b8a6dc

Upload 8 files

Browse files
Files changed (8) hide show
  1. 1.jpg +0 -0
  2. 2.jpg +0 -0
  3. 3.jpg +0 -0
  4. Damage_Vehicle_Y5.pt +3 -0
  5. app.py +83 -0
  6. requirements.txt +8 -0
  7. yolov5l.pt +3 -0
  8. yolov5s.pt +3 -0
1.jpg ADDED
2.jpg ADDED
3.jpg ADDED
Damage_Vehicle_Y5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2afa57d9aba09f8577fd4a4e8f079c6d7566daf401fab314900d1ae96a7c2aa3
3
+ size 14431485
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio.outputs import Label
3
+ import cv2
4
+ import requests
5
+ import os
6
+ import numpy as np
7
+
8
+ from ultralytics import YOLO
9
+ import yolov5
10
+
11
+ # Image download
12
+ # file_urls = [
13
+ # ]
14
+
15
+ # def download_file(url, save_name):
16
+ # url = url
17
+ # if not os.path.exists(save_name):
18
+ # file = requests.get(url)
19
+ # open(save_name, 'wb').write(file.content)
20
+
21
+ # for i, url in enumerate(file_urls):
22
+ # download_file(
23
+ # file_urls[i],
24
+ # f"image_{i}.jpg"
25
+ # )
26
+
27
+ # Function for inference
28
+ def yolov5_inference(
29
+ image: gr.inputs.Image = None,
30
+ model_path: gr.inputs.Dropdown = None,
31
+ image_size: gr.inputs.Slider = 640,
32
+ conf_threshold: gr.inputs.Slider = 0.25,
33
+ iou_threshold: gr.inputs.Slider = 0.45 ):
34
+
35
+ # Loading Yolo V5 model
36
+ model = yolov5.load(model_path, device="cpu")
37
+
38
+ # Setting model configuration
39
+ model.conf = conf_threshold
40
+ model.iou = iou_threshold
41
+
42
+ # Inference
43
+ results = model([image], size=image_size)
44
+
45
+ # Cropping the predictions
46
+ crops = results.crop(save=False)
47
+ img_crops = []
48
+ for i in range(len(crops)):
49
+ img_crops.append(crops[i]["im"][..., ::-1])
50
+ return results.render()[0], img_crops
51
+
52
+ # gradio Input
53
+ inputs = [
54
+ gr.inputs.Image(type="pil", label="Input Image"),
55
+ gr.inputs.Dropdown(["Damage_Vehicle_Y5.pt","yolov5s.pt", "yolov5m.pt", "yolov5l.pt", "yolov5x.pt"], label="Model", default = 'Crime_Y5.pt'),
56
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
57
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
58
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
59
+ ]
60
+
61
+ # gradio Output
62
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
63
+ outputs_crops = gr.Gallery(label="Object crop")
64
+
65
+ title = "Vehicle damage detection"
66
+
67
+ # gradio examples: "Image", "Model", "Image Size", "Confidence Threshold", "IOU Threshold"
68
+ examples = [['1.jpg', 'Damage_Vehicle_Y5.pt', 640, 0.35, 0.45]
69
+ ,['2.jpg', 'Damage_Vehicle_Y5.pt', 640, 0.35, 0.45]
70
+ ,['3.jpg', 'Damage_Vehicle_Y5.pt', 640, 0.35, 0.45]]
71
+
72
+ # gradio app launch
73
+ demo_app = gr.Interface(
74
+ fn=yolov5_inference,
75
+ inputs=inputs,
76
+ outputs=[outputs,outputs_crops],
77
+ title=title,
78
+ examples=examples,
79
+ cache_examples=True,
80
+ live=True,
81
+ theme='huggingface',
82
+ )
83
+ demo_app.launch(debug=True, enable_queue=True, width=50, height=50)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ gradio==3.4.0
3
+ opencv-python
4
+ numpy<1.24
5
+ ultralytics
6
+ yolov5
7
+
8
+
yolov5l.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f603b7354c25454d1270663a14d8ddc1eea98e5eebc1d84ce0c6e3150fa155f
3
+ size 93622629
yolov5s.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b3b748c1e592ddd8868022e8732fde20025197328490623cc16c6f24d0782ee
3
+ size 14808437