hlydecker commited on
Commit
74489e9
1 Parent(s): 22b11a1

v2.0 Gradio Blocks rebuild

Browse files
Files changed (1) hide show
  1. app.py +32 -10
app.py CHANGED
@@ -5,11 +5,20 @@ import torchvision
5
  import numpy as np
6
  from PIL import Image
7
 
 
 
 
 
 
8
  # Load MegaDetector v5a model
9
  # TODO: Allow user selectable model?
 
 
10
  model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt")
11
 
12
- def yolo(im, size=640):
 
 
13
  g = (size / max(im.size)) # gain
14
  im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
15
 
@@ -17,13 +26,26 @@ def yolo(im, size=640):
17
  results.render() # updates results.imgs with boxes and labels
18
  return Image.fromarray(results.imgs[0])
19
 
 
20
 
21
- inputs = gr.inputs.Image(type='pil', label="Original Image")
22
- outputs = gr.outputs.Image(type="pil", label="Output Image")
23
-
24
- title = "MegaDetector v5"
25
- description = "Detect and identify animals, people and vehicles in camera trap images."
26
- article = "<p style='text-align: center'>MegaDetector makes predictions using a YOLOv5 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>Microsoft's CameraTraps GitHub</a>. This app was built by <a href='https://github.com/hlydecker'>Henry Lydecker</a> but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
27
-
28
- examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
29
- gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import numpy as np
6
  from PIL import Image
7
 
8
+ # Markdown Content
9
+ title = """<h1 id="title">MegaDetector v5</h1>"""
10
+ description = "Detect and identify animals, people and vehicles in camera trap images."
11
+ article = "<p style='text-align: center'>MegaDetector makes predictions using a YOLOv5 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>Microsoft's CameraTraps GitHub</a>. This app was built by <a href='https://github.com/hlydecker'>Henry Lydecker</a> but really depends on code and models developed by <a href='http://ecologize.org/'>Ecologize</a> and <a href='http://aka.ms/aiforearth'>Microsoft AI for Earth</a>. Find out more about the YOLO model from the original creator, <a href='https://pjreddie.com/darknet/yolo/'>Joseph Redmon</a>. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. <a href='https://github.com/ultralytics/yolov5'>Source code</a> | <a href='https://pytorch.org/hub/ultralytics_yolov5'>PyTorch Hub</a></p>"
12
+
13
  # Load MegaDetector v5a model
14
  # TODO: Allow user selectable model?
15
+ models = ["model_weights/md_v5a.0.0.pt","model_weights/md_v5b.0.0.pt"]
16
+
17
  model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt")
18
 
19
+ def yolo(im, size=640, model_name):
20
+
21
+ model = model = torch.hub.load('ultralytics/yolov5', 'custom', model_name)
22
  g = (size / max(im.size)) # gain
23
  im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
24
 
 
26
  results.render() # updates results.imgs with boxes and labels
27
  return Image.fromarray(results.imgs[0])
28
 
29
+ demo = gr.Blocks()
30
 
31
+ with demo:
32
+ gr.Markdown(title)
33
+ gr.Markdown(description)
34
+ options = gr.Dropdown(choices=models, label="Select MegaDetector Model", show_label=True)
35
+
36
+ with gr.Row():
37
+ img_input = gr.Image(type='pil', label="Original Image")
38
+ img_output = gr.Image(type="pil", label="Output Image")
39
+
40
+ with gr.Row():
41
+ example_images = gr.Dataset(components=[img_input],
42
+ samples=[['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']])
43
+
44
+ detect_button = gr.Button('Detect')
45
+
46
+ detect_button.click(yolo, inputs=[options,img_input], outputs=img_output, queue=True)
47
+ example_images.click(fn=set_example_image, inputs = [example_images], outputs=[img_input])
48
+
49
+ gr.Markdown(article)
50
+
51
+ demo.launch(enable_queue=True)