import gradio as gr import albumentations as albu from pylab import imshow import numpy as np import cv2 import torch import albumentations as albu from iglovikov_helper_functions.utils.image_utils import load_rgb, pad, unpad from iglovikov_helper_functions.dl.pytorch.utils import tensor_from_rgb_image from collections import namedtuple from tempfile import NamedTemporaryFile import os from people_segmentation.pre_trained_models import create_model model = create_model("Unet_2020-07-20") model.eval() # Define model import matplotlib.pyplot as plt from pylab import imshow def segment_people(image): transform = albu.Compose([albu.Normalize(p=1)], p=1) padded_image, pads = pad(image, factor=32, border=cv2.BORDER_CONSTANT) x = transform(image=padded_image)["image"] x = torch.unsqueeze(tensor_from_rgb_image(x), 0) with torch.no_grad(): prediction = model(x)[0][0] mask = (prediction > 0).cpu().numpy().astype(np.uint8) mask = unpad(mask, pads) dst = cv2.addWeighted(image, 1, (cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB) * (0, 255, 0)).astype(np.uint8), 0.5, 0) return dst # Create Gradio app def gradio_segmentation(image_path): image = load_rgb(image_path) mask = segment_people(image) return mask examples = [ [ "76.jpg"], [ "69.jpg"], [ "80.jpg"] ] description = """ # People Segmentation This application segments people from the input image. Upload an image to see the segmented output. """ gr.Interface( fn=gradio_segmentation, inputs=gr.Image(type="filepath"), outputs=gr.Image(type="numpy"), examples=examples, title="People Segmentation", description=description, ).launch()