Spaces:
Running
Running
File size: 6,027 Bytes
bc059ff e8a9c3a bc059ff e8a9c3a bc059ff e8a9c3a bc059ff e8a9c3a bc059ff e8a9c3a bc059ff e8a9c3a bc059ff 0f711d8 bc059ff e8a9c3a bc059ff e8a9c3a bc059ff e8a9c3a bc059ff e8a9c3a 8ae8fc8 e8a9c3a bc059ff e8a9c3a bc059ff e8a9c3a bc059ff 8ae8fc8 e8a9c3a bc059ff 8ae8fc8 bc059ff 8ae8fc8 e8a9c3a bc059ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import os
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import torchvision.transforms as T
from PIL import Image
from decord import VideoReader
from decord import cpu
from uniformer_light_video import uniformer_xxs_video
from uniformer_light_image import uniformer_xxs_image
from kinetics_class_index import kinetics_classnames
from imagenet_class_index import imagenet_classnames
from transforms import (
GroupNormalize, GroupScale, GroupCenterCrop,
Stack, ToTorchFormatTensor
)
import gradio as gr
from huggingface_hub import hf_hub_download
# Device on which to run the model
# Set to cuda to load on GPU
device = "cpu"
model_video_path = hf_hub_download(repo_id="Andy1621/uniformer_light", filename="uniformer_xxs16_160_k400.pth")
model_image_path = hf_hub_download(repo_id="Andy1621/uniformer_light", filename="uniformer_xxs_160_in1k.pth")
# Pick a pretrained model
model_video = uniformer_xxs_video()
model_video.load_state_dict(torch.load(model_video_path, map_location='cpu'))
model_image = uniformer_xxs_image()
model_image.load_state_dict(torch.load(model_image_path, map_location='cpu'))
# Set to eval mode and move to desired device
model_video = model_video.to(device).eval()
model_image = model_image.to(device).eval()
# Create an id to label name mapping
kinetics_id_to_classname = {}
for k, v in kinetics_classnames.items():
kinetics_id_to_classname[k] = v
imagenet_id_to_classname = {}
for k, v in imagenet_classnames.items():
imagenet_id_to_classname[k] = v[1]
def get_index(num_frames, num_segments=8):
seg_size = float(num_frames - 1) / num_segments
start = int(seg_size / 2)
offsets = np.array([
start + int(np.round(seg_size * idx)) for idx in range(num_segments)
])
return offsets
def load_video(video_path):
vr = VideoReader(video_path, ctx=cpu(0))
num_frames = len(vr)
frame_indices = get_index(num_frames, 16)
# transform
crop_size = 160
scale_size = 160
input_mean = [0.485, 0.456, 0.406]
input_std = [0.229, 0.224, 0.225]
transform = T.Compose([
GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(),
ToTorchFormatTensor(),
GroupNormalize(input_mean, input_std)
])
images_group = list()
for frame_index in frame_indices:
img = Image.fromarray(vr[frame_index].asnumpy())
images_group.append(img)
torch_imgs = transform(images_group)
return torch_imgs
def inference_video(video):
vid = load_video(video)
# The model expects inputs of shape: B x C x H x W
TC, H, W = vid.shape
inputs = vid.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4)
with torch.no_grad():
prediction = model_video(inputs)
prediction = F.softmax(prediction, dim=1).flatten()
return {kinetics_id_to_classname[str(i)]: float(prediction[i]) for i in range(400)}
def set_example_video(example: list) -> dict:
return gr.Video.update(value=example[0])
def inference_image(img):
image = img
image_transform = T.Compose(
[
T.Resize(224),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
image = image_transform(image)
# The model expects inputs of shape: B x C x H x W
image = image.unsqueeze(0)
with torch.no_grad():
prediction = model_image(image)
prediction = F.softmax(prediction, dim=1).flatten()
return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
def set_example_image(example: list) -> dict:
return gr.Image.update(value=example[0])
demo = gr.Blocks()
with demo:
gr.Markdown(
"""
# UniFormer Light
Gradio demo for <a href='https://github.com/Sense-X/UniFormer' target='_blank'>UniFormer</a>: To use it, simply upload your video, or click one of the examples to load them. Read more at the links below.
"""
)
with gr.Tab("Video"):
with gr.Box():
with gr.Row():
with gr.Column():
with gr.Row():
input_video = gr.Video(label='Input Video').style(height=360)
with gr.Row():
submit_video_button = gr.Button('Submit')
with gr.Column():
label_video = gr.Label(num_top_classes=5)
with gr.Row():
example_videos = gr.Dataset(components=[input_video], samples=[['./videos/hitting_baseball.mp4'], ['./videos/hoverboarding.mp4'], ['./videos/yoga.mp4']])
with gr.Tab("Image"):
with gr.Box():
with gr.Row():
with gr.Column():
with gr.Row():
input_image = gr.Image(label='Input Image', type='pil').style(height=360)
with gr.Row():
submit_image_button = gr.Button('Submit')
with gr.Column():
label_image = gr.Label(num_top_classes=5)
with gr.Row():
example_images = gr.Dataset(components=[input_image], samples=[['./images/cat.png'], ['./images/dog.png'], ['./images/panda.png']])
gr.Markdown(
"""
<p style='text-align: center'><a href='https://arxiv.org/abs/2201.09450' target='_blank'>[TPAMI] UniFormer: Unifying Convolution and Self-attention for Visual Recognition</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p>
"""
)
submit_video_button.click(fn=inference_video, inputs=input_video, outputs=label_video)
example_videos.click(fn=set_example_video, inputs=example_videos, outputs=example_videos.components)
submit_image_button.click(fn=inference_image, inputs=input_image, outputs=label_image)
example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
demo.launch(enable_queue=True) |