Spaces:
Running
on
A100
Running
on
A100
File size: 5,957 Bytes
880de81 02abdab 880de81 02abdab 880de81 02abdab 4c7362f 1167d4f 4c7362f 880de81 1167d4f 880de81 75813eb 02abdab 6c8ddcc 880de81 6c8ddcc 880de81 75813eb 880de81 6c8ddcc 880de81 6c8ddcc 880de81 75813eb 880de81 02abdab 880de81 4c7362f 1167d4f 880de81 4c7362f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import os
import json
import gradio as gr
import tempfile
from PIL import Image, ImageDraw, ImageFont
import cv2
from typing import Tuple, Optional
import torch
from pathlib import Path
import time
import torch
import spaces
from video_highlight_detector import (
load_model,
BatchedVideoHighlightDetector,
get_video_duration_seconds
)
def load_examples(json_path: str) -> dict:
with open(json_path, 'r') as f:
return json.load(f)
def format_duration(seconds: int) -> str:
hours = seconds // 3600
minutes = (seconds % 3600) // 60
secs = seconds % 60
if hours > 0:
return f"{hours}:{minutes:02d}:{secs:02d}"
return f"{minutes}:{secs:02d}"
def add_watermark(video_path: str, output_path: str):
watermark_text = "🤗 SmolVLM2 Highlight"
command = f"""ffmpeg -i {video_path} -vf \
"drawtext=text='{watermark_text}':fontcolor=white:fontsize=24:box=1:[email protected]:\
boxborderw=5:x=w-tw-10:y=h-th-10" \
-codec:a copy {output_path}"""
os.system(command)
@spaces.GPU
def process_video(
video_path: str,
progress = gr.Progress()
) -> Tuple[str, str, str, str]:
try:
duration = get_video_duration_seconds(video_path)
if duration > 1200: # 20 minutes
return None, None, None, "Video must be shorter than 20 minutes"
progress(0.1, desc="Loading model...")
model, processor = load_model()
detector = BatchedVideoHighlightDetector(model, processor)
progress(0.2, desc="Analyzing video content...")
video_description = detector.analyze_video_content(video_path)
progress(0.3, desc="Determining highlight types...")
highlight_types = detector.determine_highlights(video_description)
progress(0.4, desc="Detecting and extracting highlights...")
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
temp_output = tmp_file.name
detector.create_highlight_video(video_path, temp_output)
progress(0.9, desc="Adding watermark...")
output_path = temp_output.replace('.mp4', '_watermark.mp4')
add_watermark(temp_output, output_path)
os.unlink(temp_output)
video_description = video_description[:500] + "..." if len(video_description) > 500 else video_description
highlight_types = highlight_types[:500] + "..." if len(highlight_types) > 500 else highlight_types
return output_path, video_description, highlight_types, None
except Exception as e:
return None, None, None, f"Error processing video: {str(e)}"
def create_ui(examples_path: str):
examples_data = load_examples(examples_path)
with gr.Blocks() as app:
gr.Markdown("# Video Highlight Generator")
gr.Markdown("Upload a video (max 20 minutes) and get an automated highlight reel!")
with gr.Row():
gr.Markdown("## Example Results")
for example in examples_data["examples"]:
with gr.Row():
with gr.Column():
gr.Markdown(f"## {example["title"]}")
gr.Video(
value=example["original"]["url"],
label=f"Original ({format_duration(example['original']['duration_seconds'])})",
interactive=False
)
with gr.Column():
with gr.Accordion("Model chain of thought details", open=False):
gr.Markdown(example["analysis"]["video_description"])
gr.Markdown(example["analysis"]["highlight_types"])
gr.Video(
value=example["highlights"]["url"],
label=f"Highlights ({format_duration(example['highlights']['duration_seconds'])})",
interactive=False
)
gr.Markdown("## Try It Yourself!")
with gr.Row():
input_video = gr.Video(
label="Upload your video (max 20 minutes)",
interactive=True
)
with gr.Row(visible=False) as results_row:
with gr.Column():
video_description = gr.Markdown(label="Video Analysis")
with gr.Column():
highlight_types = gr.Markdown(label="Detected Highlights")
with gr.Row(visible=False) as output_row:
output_video = gr.Video(label="Highlight Video")
download_btn = gr.Button("Download Highlights")
error_msg = gr.Markdown(visible=False)
def on_upload(video):
results_row.visible = False
output_row.visible = False
error_msg.visible = False
if not video:
error_msg.visible = True
error_msg.value = "Please upload a video"
return None, None, None, error_msg
output_path, desc, highlights, err = process_video(video)
if err:
error_msg.visible = True
error_msg.value = err
return None, None, None, error_msg
results_row.visible = True
output_row.visible = True
return output_path, desc, highlights, ""
input_video.change(
on_upload,
inputs=[input_video],
outputs=[output_video, video_description, highlight_types, error_msg]
)
download_btn.click(
lambda x: x,
inputs=[output_video],
outputs=[output_video]
)
return app
if __name__ == "__main__":
# Initialize CUDA
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
zero = torch.Tensor([0]).to(device)
app = create_ui("video_spec.json")
app.launch() |