Obj-det-2-json / app.py
RyanPham19092002
Add application file
6ceeb55
raw
history blame
No virus
3.78 kB
import gradio as gr
import torch
import os
import requests
import json
import cv2
from PIL import Image
from timeit import default_timer as timer
import numpy as np
import aiofiles
from transformers import AutoModel
model = torch.hub.load('ultralytics/yolov5','yolov5s', pretrained=True)
#model1 = AutoModel.from_pretrained(model)
cnt = 0
def LCR(bbox,x_img, y_img):
x1 = bbox[0]/x_img
x2 = bbox[2]/x_img
if x1 < 0.2 and x2 < 0.2 :
location = "Left"
elif x1 > 0.8 and x2 > 0.8:
location = "Right"
elif x1 < 0.2 and (x2 <= 0.8 and x2 >= 0.2):
if (x1 + x2) < 0.4:
location = "Left"
else:
location = "Center"
elif x2 > 0.8 and (x1 <= 0.8 and x1 >= 0.2):
if (x1 + x2) > 1.6:
location = "Right"
else:
location = "Center"
else:
location = "Center"
print(f"x1 {x1} x2 {x2} bbox0 {bbox[0]} bbox2 {bbox[2]} x_img {x_img} LocationLCR {location}")
return location
def ACB(bbox, x_img, y_img, location):
y1 = bbox[1]/y_img
y2 = bbox[3]/y_img
if location == "Center":
if y1 < 0.33333 and y2 < 0.33333 :
location = "Above"
elif y1 > 0.66667 and y2 > 0.66667:
location = "Below"
elif y1 < 0.33333 and (y2 <= 0.66667 and y2 >= 0.33333):
if (y1 + y2) < 0.66667:
location = "Above"
else:
location = "Center"
elif y2 > 0.66667 and (y1 <= 0.66667 and y1 >= 0.33333):
if (y1 + y2) > 1.33333:
location = "Below"
else:
location = "Center"
else:
location = "Center"
else:
pass
print(f"y1 {y1} y2 {y2} bbox1 {bbox[1]} bbox3 {bbox[3]} y_img {y_img} Location{location}")
return location
#print(bbox[0])
def turn_img_into_fileJSON(frame):
start_time = timer()
x_img, y_img = frame.size
print(x_img,y_img)
global cnt
objects = []
prediction = model(frame)
for det in prediction.xyxy[0]:
class_id = int(det[5])
class_name = model.names[class_id]
confidence = float(det[4])
bbox = det[:4].tolist()
if(confidence >= 0.5):
location = LCR(bbox, x_img, y_img)
location = ACB(bbox, x_img, y_img, location)
# Save the results to the list
objects.append({
'Class': class_name,
#'BoundingBox': bbox,
'Location': location,
'Confidence': confidence
})
with open('{:05d}.json'.format(cnt) , 'w') as f:
json.dump(objects, f)
cnt += 1
pred_time = round(timer() - start_time, 5)
json_str = json.dumps(objects)
return json_str, pred_time
#path = [["D:/cuoc_thi/object-detection/download.jpg"],["C:/Users/ACER/Pictures/mydestiny/273536337_788402492117531_8798195010554693138_n.jpg"]]
title = "Object-detection"
description = "An EfficientNetB2 feature extractor computer vision model to classify images of object."
article = "Created by Ryan"
# Create the Gradio demo
demo = gr.Interface(fn=turn_img_into_fileJSON, # mapping function from input to output
inputs="pil", # what are the inputs?
outputs=[gr.JSON(label="JSON Output"),
#gr.Label(num_top_classes=80, label="Predictions"),
gr.Number(label="Prediction time (s)")],
#gr.outputs.Label(num_top_classes= 80),
#examples=path,
title=title,
description=description,
article=article,
live = True)
#demo.launch()
demo.launch(share=True)