4128587T / app.py
ZunYin's picture
Update app.py
3f185a6 verified
import os
import gradio as gr
import cv2
from ultralytics import YOLO
# Define the folder containing the images and video
folder_path = "info" # Replace with your folder name or path
# Get list of files from the folder
image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png'))]
video_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.mp4', '.avi', '.mov'))]
# Ensure the folder contains the expected number of files
if len(image_files) < 2 or len(video_files) < 1:
raise ValueError("Folder must contain at least 2 images and 1 video.")
# Select the first two images and the first video
image_examples = [[image_files[0]], [image_files[1]]]
video_examples = [[video_files[0]]]
# Load the YOLO model
model = YOLO('best.pt')
# Function for processing images
def show_preds_image(image_path):
image = cv2.imread(image_path)
results = model.predict(source=image_path)
annotated_image = results[0].plot()
return cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
# Function for processing videos
def show_preds_video(video_path):
cap = cv2.VideoCapture(video_path)
out_frames = []
fps = int(cap.get(cv2.CAP_PROP_FPS))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
results = model.predict(source=frame)
annotated_frame = results[0].plot()
out_frames.append(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
cap.release()
# Save the annotated video
output_path = "annotated_video.mp4"
height, width, _ = out_frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
for frame in out_frames:
writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
writer.release()
return output_path
# Gradio interfaces
inputs_image = gr.Image(type="filepath", label="Input Image")
outputs_image = gr.Image(type="numpy", label="Output Image")
interface_image = gr.Interface(
fn=show_preds_image,
inputs=inputs_image,
outputs=outputs_image,
title="Safety Head Detector - Image",
examples=image_examples,
)
inputs_video = gr.Video(label="Input Video")
outputs_video = gr.Video(label="Annotated Output")
interface_video = gr.Interface(
fn=show_preds_video,
inputs=inputs_video,
outputs=outputs_video,
title="Safety Head Detector - Video",
examples=video_examples,
)
# Combine into a tabbed interface
gr.TabbedInterface(
[interface_image, interface_video],
tab_names=['Image Inference', 'Video Inference']
).launch(share=True)