SakshiRathi77 commited on
Commit
611a237
β€’
1 Parent(s): f99c7e8

Upload 4 files

Browse files
Files changed (3) hide show
  1. README.md +6 -5
  2. app.py +143 -2
  3. requirements.txt +2 -0
README.md CHANGED
@@ -1,13 +1,14 @@
1
  ---
2
- title: Void
3
- emoji: πŸ¦€
4
- colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.20.1
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Yolov9
3
+ emoji: πŸƒ
4
+ colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.19.1
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
+ short_description: State-of-the-art Object Detection YOLOV9 Demo
12
  ---
13
 
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,4 +1,145 @@
1
- # SakshiRathi77/void-space-detection
2
  import gradio as gr
 
 
3
 
4
- gr.load("models/SakshiRathi77/void-space-detection").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import spaces
3
+ from huggingface_hub import hf_hub_download
4
 
5
+
6
+ def download_models(model_id):
7
+ hf_hub_download("merve/yolov9", filename=f"{model_id}", local_dir=f"./")
8
+ return f"./{model_id}"
9
+
10
+ @spaces.GPU
11
+ def yolov9_inference(img_path, model_id, image_size, conf_threshold, iou_threshold):
12
+ """
13
+ Load a YOLOv9 model, configure it, perform inference on an image, and optionally adjust
14
+ the input size and apply test time augmentation.
15
+
16
+ :param model_path: Path to the YOLOv9 model file.
17
+ :param conf_threshold: Confidence threshold for NMS.
18
+ :param iou_threshold: IoU threshold for NMS.
19
+ :param img_path: Path to the image file.
20
+ :param size: Optional, input size for inference.
21
+ :return: A tuple containing the detections (boxes, scores, categories) and the results object for further actions like displaying.
22
+ """
23
+ # Import YOLOv9
24
+ import yolov9
25
+
26
+ # Load the model
27
+ model_path = download_models(model_id)
28
+ model = yolov9.load(model_path, device="cuda:0")
29
+
30
+ # Set model parameters
31
+ model.conf = conf_threshold
32
+ model.iou = iou_threshold
33
+
34
+ # Perform inference
35
+ results = model(img_path, size=image_size)
36
+
37
+ # Optionally, show detection bounding boxes on image
38
+ output = results.render()
39
+
40
+ return output[0]
41
+
42
+
43
+ def app():
44
+ with gr.Blocks():
45
+ with gr.Row():
46
+ with gr.Column():
47
+ img_path = gr.Image(type="filepath", label="Image")
48
+ model_path = gr.Dropdown(
49
+ label="Model",
50
+ choices=[
51
+ "gelan-c.pt",
52
+ "gelan-e.pt",
53
+ "yolov9-c.pt",
54
+ "yolov9-e.pt",
55
+ ],
56
+ value="gelan-e.pt",
57
+ )
58
+ image_size = gr.Slider(
59
+ label="Image Size",
60
+ minimum=320,
61
+ maximum=1280,
62
+ step=32,
63
+ value=640,
64
+ )
65
+ conf_threshold = gr.Slider(
66
+ label="Confidence Threshold",
67
+ minimum=0.1,
68
+ maximum=1.0,
69
+ step=0.1,
70
+ value=0.4,
71
+ )
72
+ iou_threshold = gr.Slider(
73
+ label="IoU Threshold",
74
+ minimum=0.1,
75
+ maximum=1.0,
76
+ step=0.1,
77
+ value=0.5,
78
+ )
79
+ yolov9_infer = gr.Button(value="Inference")
80
+
81
+ with gr.Column():
82
+ output_numpy = gr.Image(type="numpy",label="Output")
83
+
84
+ yolov9_infer.click(
85
+ fn=yolov9_inference,
86
+ inputs=[
87
+ img_path,
88
+ model_path,
89
+ image_size,
90
+ conf_threshold,
91
+ iou_threshold,
92
+ ],
93
+ outputs=[output_numpy],
94
+ )
95
+
96
+ gr.Examples(
97
+ examples=[
98
+ [
99
+ "data/zidane.jpg",
100
+ "gelan-e.pt",
101
+ 640,
102
+ 0.4,
103
+ 0.5,
104
+ ],
105
+ [
106
+ "data/huggingface.jpg",
107
+ "yolov9-c.pt",
108
+ 640,
109
+ 0.4,
110
+ 0.5,
111
+ ],
112
+ ],
113
+ fn=yolov9_inference,
114
+ inputs=[
115
+ img_path,
116
+ model_path,
117
+ image_size,
118
+ conf_threshold,
119
+ iou_threshold,
120
+ ],
121
+ outputs=[output_numpy],
122
+ cache_examples=True,
123
+ )
124
+
125
+
126
+ gradio_app = gr.Blocks()
127
+ with gradio_app:
128
+ gr.HTML(
129
+ """
130
+ <h1 style='text-align: center'>
131
+ YOLOv9: Learning What You Want to Learn Using Programmable Gradient Information
132
+ </h1>
133
+ """)
134
+ gr.HTML(
135
+ """
136
+ <h3 style='text-align: center'>
137
+ Follow me for more!
138
+ <a href='https://twitter.com/kadirnar_ai' target='_blank'>Twitter</a> | <a href='https://github.com/kadirnar' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/kadir-nar/' target='_blank'>Linkedin</a> | <a href='https://www.huggingface.co/kadirnar/' target='_blank'>HuggingFace</a>
139
+ </h3>
140
+ """)
141
+ with gr.Row():
142
+ with gr.Column():
143
+ app()
144
+
145
+ gradio_app.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ yolov9pip==0.0.4
2
+ huggingface_hub