File size: 1,737 Bytes
ebbfbd3 05772eb ebbfbd3 33d016a ebbfbd3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import gradio as gr
import numpy as np
import torch
from PIL import Image, ImageDraw
from transformers import AutoImageProcessor, AutoModelForObjectDetection
description = """
## This interface is made with 🤗 Gradio.
Simply upload an image of any person wearning/not-wearing helmet.
"""
model_id = "devonho/detr-resnet-50_finetuned_cppe5"
image_processor = AutoImageProcessor.from_pretrained(model_id)
model = AutoModelForObjectDetection.from_pretrained(model_id)
# Gradio Components
image_in = gr.components.Image()
image_out = gr.components.Image()
def model_inference(img):
with torch.no_grad():
inputs = image_processor(images=img, return_tensors="pt")
outputs = model(**inputs)
target_sizes = torch.tensor([img.size[::-1]])
results = image_processor.post_process_object_detection(
outputs, threshold=0.5, target_sizes=target_sizes
)[0]
return results
def plot_results(image):
image = Image.fromarray(np.uint8(image))
results = model_inference(img=image)
draw = ImageDraw.Draw(image)
for score, label, box in zip(
results["scores"], results["labels"], results["boxes"]
):
score = score.item()
box = [round(i, 2) for i in box.tolist()]
x, y, x2, y2 = tuple(box)
draw.rectangle((x, y, x2, y2), outline="red", width=1)
draw.text((x, y), model.config.id2label[label.item()], fill="white")
draw.text((x+0.5, y-0.5), text=str(score), fill='green' if score > 0.7 else 'red')
return image
Iface = gr.Interface(
fn=plot_results,
inputs=[image_in],
outputs=image_out,
title="Object Detection Using Fine-Tuned Vision Transformers",
description=description,
).launch()
|