import os os.system('pip install gradio --upgrade') os.system('pip freeze') import torch import gradio as gr from moviepy.editor import * model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3") # or "resnet50" convert_video = torch.hub.load("PeterL1n/RobustVideoMatting", "converter") def inference(video): os.system('rm output.mp4') #clip = VideoFileClip(video).subclip(0, 5) #clip.write_videofile("output.mp4") os.system('ffmpeg -ss 00:00:00 -i '+ video +' -to 00:00:05 -c copy -y output.mp4') convert_video( model, # The loaded model, can be on any device (cpu or cuda). input_source='output.mp4', # A video file or an image sequence directory. input_resize=(500, 500), # [Optional] Resize the input (also the output). downsample_ratio=0.25, # [Optional] If None, make downsampled max size be 512px. output_type='video', # Choose "video" or "png_sequence" output_composition='com.mp4', # File path if video; directory path if png sequence. output_alpha="pha.mp4", # [Optional] Output the raw alpha prediction. output_foreground="fgr.mp4", # [Optional] Output the raw foreground prediction. output_video_mbps=4, # Output video mbps. Not needed for png sequence. seq_chunk=7, # Process n frames at once for better parallelism. num_workers=1, # Only for image sequence input. Reader threads. progress=True # Print conversion progress. ) return 'com.mp4',"pha.mp4","fgr.mp4" title = "Robust Video Matting" description = "Gradio demo for Robust Video Matting. To use it, simply upload your video, currently only mp4 and ogg formats are supported. Please trim video to 5 seconds or less. Read more at the links below." article = "

Robust High-Resolution Video Matting with Temporal Guidance | Github Repo

" gr.Interface( inference, gr.inputs.Video(label="Input"), [gr.outputs.Video(label="Output Composition"),gr.outputs.Video(label="Output Alpha"),gr.outputs.Video(label="Output Foreground")], title=title, description=description, article=article, enable_queue=True).launch(debug=True)