Spaces:
Sleeping
Sleeping
File size: 2,341 Bytes
a236f10 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
import torch
import json
import yolov5
# Images
torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
torch.hub.download_url_to_file('https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/image3.jpg', 'image3.jpg')
torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt','yolov5s.pt')
model_path = "yolov5x.pt" #"yolov5s.pt" #"yolov5m.pt", "yolov5l.pt", "yolov5x.pt",
image_size = 640,
conf_threshold = 0.25,
iou_threshold = 0.45,
model = yolov5.load(model_path, device="cpu")
def yolov5_inference(
image: gr.inputs.Image = None,
):
"""
YOLOv5 inference function
Args:
image: Input image
model_path: Path to the model
image_size: Image size
conf_threshold: Confidence threshold
iou_threshold: IOU threshold
Returns:
Rendered image
"""
results = model([image], size=image_size)
tensor = {
"tensorflow": [
]
}
if results.pred is not None:
for i, element in enumerate(results.pred[0]):
object = {}
#print (element[0])
itemclass = round(element[5].item())
object["classe"] = itemclass
object["nome"] = results.names[itemclass]
object["score"] = element[4].item()
object["x"] = element[0].item()
object["y"] = element[1].item()
object["w"] = element[2].item()
object["h"] = element[3].item()
tensor["tensorflow"].append(object)
text = json.dumps(tensor)
#print (text)
return text #results.render()[0]
inputs = [
gr.inputs.Image(type="pil", label="Input Image"),
]
outputs = gr.outputs.Image(type="filepath", label="Output Image")
title = "YOLOv5"
description = "YOLOv5 is a family of object detection models pretrained on COCO dataset. This model is a pip implementation of the original YOLOv5 model."
examples = [['zidane.jpg'], ['image3.jpg']]
demo_app = gr.Interface(
fn=yolov5_inference,
inputs=inputs,
outputs=["text"],
title=title,
examples=examples,
#cache_examples=True,
#live=True,
#theme='huggingface',
)
demo_app.launch(debug=True, enable_queue=True)
|