Spaces:
Running
on
A100
Running
on
A100
import torch | |
from typing import List, Tuple, Dict, Optional | |
from tqdm import tqdm | |
import logging | |
from PIL import Image | |
import datetime | |
from num2words import num2words | |
import subprocess | |
import sys | |
from modeling_smolvlm import SmolVLMForConditionalGeneration | |
from transformers import AutoProcessor, AutoTokenizer | |
import json | |
import math | |
logger = logging.getLogger(__name__) | |
logging.basicConfig( | |
level=logging.INFO | |
) | |
SYSTEM_MESSAGE = ( | |
"Carefully watch the video and pay attention to the cause and sequence of events, " | |
"the detail and movement of objects, and the action and pose of persons. " | |
"Based on your observations, answer the question with yes or no." | |
" <end_of_utterance>" | |
) | |
FRAME_TIMESTAMP_MESSAGE = "Frame from" | |
DEFAULT_VIDEO_INTRO = ( | |
"You are provided the following series of {frame_count} frames " | |
"from a {video_duration} [H:MM:SS] video.\n" | |
) | |
# ---------------------------------------------------------------------- | |
# Helper functions for resizing, etc. | |
# ---------------------------------------------------------------------- | |
def round_by_factor(number: float, factor: int) -> int: | |
return round(number / factor) * factor | |
def ceil_by_factor(number: float, factor: int) -> int: | |
return math.ceil(number / factor) * factor | |
def floor_by_factor(number: float, factor: int) -> int: | |
return math.floor(number / factor) * factor | |
def smart_resize( | |
height: int, | |
width: int, | |
factor: int, | |
min_pixels: int, | |
max_pixels: int, | |
max_ratio: float, | |
) -> Tuple[int, int]: | |
""" | |
Rescale (height, width) so that: | |
- aspect ratio <= max_ratio | |
- total area in [min_pixels, max_pixels] | |
- each dimension is multiple of factor | |
""" | |
ratio = max(height, width) / min(height, width) | |
if ratio > max_ratio: | |
raise ValueError(f"Aspect ratio {ratio:.2f} > {max_ratio}") | |
h_ = max(factor, round_by_factor(height, factor)) | |
w_ = max(factor, round_by_factor(width, factor)) | |
area = h_ * w_ | |
if area > max_pixels: | |
scale = math.sqrt((height * width) / max_pixels) | |
h_ = floor_by_factor(height / scale, factor) | |
w_ = floor_by_factor(width / scale, factor) | |
elif area < min_pixels: | |
scale = math.sqrt(min_pixels / (height * width)) | |
h_ = ceil_by_factor(height * scale, factor) | |
w_ = ceil_by_factor(width * scale, factor) | |
return h_, w_ | |
def _smart_nframes( | |
total_frames: int, | |
video_fps: float, | |
frame_factor: int = 1, | |
target_fps: float = 2.0, | |
min_frames: int = 4, | |
max_frames: int = 32 | |
) -> int: | |
""" | |
Decide how many frames to pick from a range based on target FPS. | |
Result is clamped to [min_frames, max_frames] and must be multiple of frame_factor. | |
""" | |
minf = ceil_by_factor(min_frames, frame_factor) | |
maxf = floor_by_factor(min(max_frames, total_frames), frame_factor) | |
val = total_frames / video_fps * target_fps | |
val = min(max(val, minf), maxf) | |
nframes = round_by_factor(val, frame_factor) | |
if not (frame_factor <= nframes <= total_frames): | |
raise ValueError(f"Invalid nframes={nframes}, out of range.") | |
return int(nframes) | |
def get_video_duration_seconds(video_path: str) -> float: | |
""" | |
Use ffprobe to retrieve the total duration of a video (in seconds). | |
""" | |
cmd = [ | |
"ffprobe", | |
"-v", "quiet", | |
"-print_format", "json", | |
"-show_format", | |
video_path | |
] | |
result = subprocess.run(cmd, capture_output=True, text=True) | |
info = json.loads(result.stdout) | |
return float(info["format"]["duration"]) | |
def get_fixed_30s_segments(video_path: str) -> list: | |
""" | |
Produce a list of (start_sec, end_sec) tuples in 30-second blocks | |
for the entire video. | |
""" | |
duration = get_video_duration_seconds(video_path) | |
segments = [] | |
start = 0.0 | |
block_size = 10.0 | |
while start < duration: | |
end = min(start + block_size, duration) | |
segments.append((start, end)) | |
start = end | |
return segments | |
class SmartVideoFrameExtractor: | |
""" | |
This class extracts frames from a specific portion of a video | |
(defined by start_frame and end_frame or start_sec and end_sec). | |
""" | |
def __init__( | |
self, | |
frame_factor: int = 1, | |
min_pixels: int = 384 * 384, | |
max_pixels: int = 384 * 384 * 4, | |
max_ratio: float = 2.0 | |
): | |
self.frame_factor = frame_factor | |
self.min_pixels = min_pixels | |
self.max_pixels = max_pixels | |
self.max_ratio = max_ratio | |
try: | |
import decord | |
self.reader = "decord" | |
decord.bridge.set_bridge("torch") | |
except ImportError: | |
self.reader = "torchvision" | |
logger.info("Decord not found, falling back to torchvision") | |
def extract_frames( | |
self, | |
video_path: str, | |
start_sec: float, | |
end_sec: float, | |
target_fps: float = 1.0, | |
min_frames: int = 4, | |
max_frames: int = 32 | |
) -> Tuple[List[Image.Image], List[str]]: | |
"""Extract frames from [start_sec, end_sec] using decord or torchvision.""" | |
if self.reader == "decord": | |
return self._extract_frames_decord( | |
video_path, start_sec, end_sec, target_fps, min_frames, max_frames | |
) | |
else: | |
return self._extract_frames_torchvision( | |
video_path, start_sec, end_sec, target_fps, min_frames, max_frames | |
) | |
def _extract_frames_decord( | |
self, | |
video_path: str, | |
start_sec: float, | |
end_sec: float, | |
target_fps: float, | |
min_frames: int, | |
max_frames: int | |
) -> Tuple[List[Image.Image], List[str]]: | |
"""Extract frames with decord from a certain segment.""" | |
import decord | |
from decord import VideoReader | |
vr = VideoReader(video_path) | |
total_frames = len(vr) | |
video_fps = vr.get_avg_fps() | |
# Convert start/end times to frame indices | |
start_frame = int(start_sec * video_fps) | |
end_frame = min(int(end_sec * video_fps), total_frames - 1) | |
if start_frame >= end_frame: | |
return [], [] | |
working_frames = end_frame - start_frame + 1 | |
nframes = _smart_nframes( | |
working_frames, | |
video_fps, | |
self.frame_factor, | |
target_fps, | |
min_frames, | |
max_frames | |
) | |
indices = torch.linspace(start_frame, end_frame, nframes).round().long() | |
frames_tensor = vr.get_batch(indices).cpu() # NHWC | |
frames = [] | |
timestamps = [] | |
for i, frame_idx in enumerate(indices): | |
frame = frames_tensor[i].numpy() | |
pil_image = Image.fromarray(frame).convert("RGB") | |
# Compute timestamp | |
sec = frame_idx.item() / video_fps | |
mm = int(sec // 60) | |
ss = int(sec % 60) | |
timestamps.append(f"{mm:02d}:{ss:02d}") | |
# Resize | |
w, h = pil_image.size | |
rh, rw = smart_resize( | |
h, w, | |
factor=8, | |
min_pixels=self.min_pixels, | |
max_pixels=self.max_pixels, | |
max_ratio=self.max_ratio | |
) | |
pil_image = pil_image.resize((rw, rh), Image.Resampling.LANCZOS) | |
frames.append(pil_image) | |
return frames, timestamps, end_sec - start_sec | |
def _extract_frames_torchvision( | |
self, | |
video_path: str, | |
start_sec: float, | |
end_sec: float, | |
target_fps: float, | |
min_frames: int, | |
max_frames: int | |
) -> Tuple[List[Image.Image], List[str]]: | |
"""Extract frames with torchvision from a certain segment.""" | |
from torchvision import io | |
# Read entire video (beware of memory usage on large videos!) | |
vid, _, info = io.read_video( | |
video_path, | |
start_pts=0, | |
end_pts=None, | |
pts_unit="sec", | |
output_format="TCHW" | |
) | |
total_frames = vid.size(0) | |
video_fps = info["video_fps"] | |
# Convert start/end times to frame indices | |
start_frame = int(start_sec * video_fps) | |
end_frame = min(int(end_sec * video_fps), total_frames - 1) | |
if start_frame >= end_frame: | |
return [], [] | |
working_frames = end_frame - start_frame + 1 | |
nframes = _smart_nframes( | |
working_frames, | |
video_fps, | |
self.frame_factor, | |
target_fps, | |
min_frames, | |
max_frames | |
) | |
indices = torch.linspace(start_frame, end_frame, nframes).round().long() | |
frames = [] | |
timestamps = [] | |
for idx in indices: | |
frame = vid[idx].permute(1, 2, 0).numpy() | |
pil_image = Image.fromarray(frame).convert("RGB") | |
sec = idx.item() / video_fps | |
mm = int(sec // 60) | |
ss = int(sec % 60) | |
timestamps.append(f"{mm:02d}:{ss:02d}") | |
w, h = pil_image.size | |
rh, rw = smart_resize( | |
h, w, | |
factor=8, | |
min_pixels=self.min_pixels, | |
max_pixels=self.max_pixels, | |
max_ratio=self.max_ratio | |
) | |
pil_image = pil_image.resize((rw, rh), Image.Resampling.LANCZOS) | |
frames.append(pil_image) | |
return frames, timestamps, end_sec - start_sec | |
class BatchedVideoHighlightDetector: | |
""" | |
Optimized version of video highlight detection that processes multiple segments | |
in parallel using batched inference. | |
""" | |
def __init__( | |
self, | |
model, | |
processor, | |
device="cuda", | |
batch_size=8, | |
max_frames_per_segment=32, | |
target_fps=1.0 | |
): | |
self.model = model | |
self.processor = processor | |
self.device = device | |
self.batch_size = batch_size | |
self.max_frames_per_segment = max_frames_per_segment | |
self.target_fps = target_fps | |
def _extract_frames_batch( | |
self, | |
video_path: str, | |
segments: List[Tuple[float, float]] | |
) -> List[Tuple[List[Image.Image], List[str], float]]: | |
""" | |
Extract frames from multiple segments in parallel using decord's batch capabilities. | |
""" | |
import decord | |
from decord import VideoReader | |
decord.bridge.set_bridge("torch") | |
# Open video once for all segments | |
vr = VideoReader(video_path) | |
video_fps = vr.get_avg_fps() | |
results = [] | |
for start_sec, end_sec in segments: | |
# Convert time to frame indices | |
start_frame = int(start_sec * video_fps) | |
end_frame = min(int(end_sec * video_fps), len(vr) - 1) | |
# Calculate number of frames to sample | |
segment_duration = end_sec - start_sec | |
desired_frames = min( | |
int(segment_duration * self.target_fps), | |
self.max_frames_per_segment | |
) | |
# Generate frame indices | |
indices = torch.linspace(start_frame, end_frame, desired_frames).round().long() | |
# Extract frames | |
frames_tensor = vr.get_batch(indices).cpu() # NHWC format | |
# Convert to PIL and generate timestamps | |
frames = [] | |
timestamps = [] | |
for i, frame_idx in enumerate(indices): | |
frame = frames_tensor[i].numpy() | |
pil_image = Image.fromarray(frame).convert("RGB") | |
# Resize maintaining aspect ratio | |
w, h = pil_image.size | |
scale = min(384 / w, 384 / h) | |
new_w = int(w * scale) | |
new_h = int(h * scale) | |
pil_image = pil_image.resize((new_w, new_h), Image.Resampling.LANCZOS) | |
frames.append(pil_image) | |
# Generate timestamp | |
sec = frame_idx.item() / video_fps | |
mm = int(sec // 60) | |
ss = int(sec % 60) | |
timestamps.append(f"{mm:02d}:{ss:02d}") | |
results.append((frames, timestamps, segment_duration)) | |
return results | |
def _prepare_batch_inputs( | |
self, | |
frame_batches: List[Tuple[List[Image.Image], List[str], float]], | |
highlight_types: str | |
) -> Dict[str, torch.Tensor]: | |
""" | |
Convert a batch of frame sequences into model inputs. | |
""" | |
conversations = [] | |
all_frames = [] | |
for frames, timestamps, duration in frame_batches: | |
# Build conversation for each segment | |
conversation = [ | |
{ | |
"role": "system", | |
"content": [{ | |
"type": "text", | |
"text": "You are a helpful assistant that analyzes videos for specific moments of interest." | |
}] | |
}, | |
{ | |
"role": "user", | |
"content": [] | |
} | |
] | |
# Add video intro | |
conversation[1]["content"].append({ | |
"type": "text", | |
"text": f"You are provided the following series of {num2words(len(frames))} frames from a {str(datetime.timedelta(seconds=duration))} [H:MM:SS] video.\n" | |
}) | |
# Add frames with timestamps | |
for ts, frame in zip(timestamps, frames): | |
conversation[1]["content"].extend([ | |
{ | |
"type": "text", | |
"text": f"Frame from {ts}:" | |
}, | |
{ | |
"type": "image" | |
} | |
]) | |
# Add highlight check question | |
conversation[1]["content"].append({ | |
"type": "text", | |
"text": f"""Do you see any of the following types of highlight moments in these frames? | |
Potential highlights to look for: | |
{highlight_types} | |
Only answer yes if you see any of those moments and answer no if you don't.""" | |
}) | |
conversations.append(conversation) | |
all_frames.extend(frames) | |
# Convert to model inputs using processor | |
prompts = [ | |
self.processor.apply_chat_template(conv, add_generation_prompt=True) | |
for conv in conversations | |
] | |
# Create batched inputs | |
model_inputs = self.processor( | |
text=prompts, | |
images=all_frames, | |
return_tensors="pt", | |
padding=True | |
).to(self.device) | |
return model_inputs | |
def _process_segment_batch( | |
self, | |
video_path: str, | |
segments: List[Tuple[float, float]], | |
highlight_types: str | |
) -> List[bool]: | |
""" | |
Process a batch of segments and return which ones contain highlights. | |
""" | |
# Extract frames for all segments in batch | |
frame_batches = self._extract_frames_batch(video_path, segments) | |
# Prepare model inputs | |
model_inputs = self._prepare_batch_inputs(frame_batches, highlight_types) | |
# Generate responses for entire batch | |
outputs = self.model.generate( | |
**model_inputs, | |
max_new_tokens=256, | |
num_beams=5, | |
temperature=0.7, | |
do_sample=True, | |
use_cache=True | |
) | |
# Process responses | |
responses = [ | |
self.processor.decode(output, skip_special_tokens=True).lower().split("assistant:")[1] | |
for output in outputs | |
] | |
# Check for "yes" in responses | |
return ["yes" in response for response in responses] | |
def create_highlight_video(self, video_path: str, output_path: str) -> List[Tuple[float, float]]: | |
""" | |
Main function that executes the batched highlight detection pipeline. | |
""" | |
# Step 1: Analyze video content | |
logger.info("Step 1: Analyzing video content...") | |
video_description = self.analyze_video_content(video_path) | |
logger.info(f"Video description: {video_description}") | |
# Step 2: Determine highlight types | |
logger.info("Step 2: Determining highlight types...") | |
highlight_types = self.determine_highlights(video_description) | |
logger.info(f"Looking for highlights: {highlight_types}") | |
# Step 3: Get all segments | |
segments = self._get_fixed_30s_segments(video_path) | |
# Step 4: Process segments in batches | |
logger.info("Step 3: Detecting highlight segments in batches...") | |
kept_segments = [] | |
for i in tqdm(range(0, len(segments), self.batch_size)): | |
batch_segments = segments[i:i + self.batch_size] | |
keep_flags = self._process_segment_batch(video_path, batch_segments, highlight_types) | |
for segment, keep in zip(batch_segments, keep_flags): | |
if keep: | |
kept_segments.append(segment) | |
logger.info(f"\tKeeping segment {segment}") | |
# Step 5: Create final video | |
if kept_segments: | |
logger.info(f"Creating highlight video with {len(kept_segments)} segments...") | |
self._concatenate_scenes(video_path, kept_segments, output_path) | |
else: | |
logger.info("No highlights detected") | |
return kept_segments | |
def analyze_video_content(self, video_path: str, sample_rate: float = 0.2) -> str: | |
""" | |
Step 1: Sample frames from the full video and get a general description | |
""" | |
extractor = SmartVideoFrameExtractor() | |
duration = get_video_duration_seconds(video_path) | |
# Sample frames from entire video | |
frames, timestamps, duration_seconds = extractor.extract_frames( | |
video_path, | |
start_sec=0, | |
end_sec=duration, | |
target_fps=sample_rate, | |
max_frames=32 # Limit total frames to not overwhelm model | |
) | |
# Build conversation asking for video description | |
system_message = "You are a helpful assistant that can understand videos. Describe what type of video this is and what's happening in it." | |
conversation = [ | |
{ | |
"role": "system", | |
"content": [{"type": "text", "text": system_message}] | |
}, | |
{ | |
"role": "user", | |
"content": [] | |
} | |
] | |
# Add video intro using DEFAULT_VIDEO_INTRO | |
conversation[1]["content"].append({ | |
"type": "text", | |
"text": DEFAULT_VIDEO_INTRO.format( | |
frame_count=num2words(len(frames)), | |
video_duration=str(datetime.timedelta(seconds=duration_seconds)) | |
) | |
}) | |
# Add frames with timestamps | |
for ts, frame in zip(timestamps, frames): | |
conversation[1]["content"].extend([ | |
{ | |
"type": "text", | |
"text": f"{FRAME_TIMESTAMP_MESSAGE} {ts}:" | |
}, | |
{ | |
"type": "image" | |
} | |
]) | |
# Add question | |
conversation[1]["content"].append({ | |
"type": "text", | |
"text": "What type of video is this and what's happening in it? Be specific about the content type and general activities you observe." | |
}) | |
# Get model response | |
prompt = self.processor.apply_chat_template(conversation, add_generation_prompt=True) | |
model_inputs = self.processor( | |
text=prompt, | |
images=frames, | |
return_tensors="pt" | |
).to(self.model.device) | |
outputs = self.model.generate( | |
**model_inputs, | |
max_new_tokens=512, | |
num_beams=5, | |
temperature=0.7, | |
do_sample=True, | |
use_cache=True | |
) | |
return self.processor.decode(outputs[0], skip_special_tokens=True).split("Assistant:")[1] | |
def determine_highlights(self, video_description: str) -> str: | |
""" | |
Step 2: Based on video description, determine what would constitute highlights | |
""" | |
conversation = [{ | |
"role": "system", | |
"content": [{"type": "text", "text": "You are a professional video editor specializing in creating viral highlight reels. You understand that the most engaging highlights are brief and focus only on exceptional moments that are statistically rare or particularly dramatic. For sports content, you typically select only 3-5 of the most remarkable moments that would make viewers say 'I can't believe that happened!'"}] | |
}, { | |
"role": "user", | |
"content": [{ | |
"type": "text", | |
"text": f"""Here is a description of a video: | |
{video_description} | |
Based on this description, list which rare segments should be included in a best of the best higlight.""" | |
}] | |
}] | |
# Based on this description, what unique segments should be included in a highlight video? list moments that cannot be missed and their description, nothing else.""" | |
# Based on this description, what unique segments should be included in a highlight video? list moments that cannot be missed.""" | |
prompt = self.processor.apply_chat_template(conversation, add_generation_prompt=True) | |
model_inputs = self.processor(text=prompt, return_tensors="pt").to(self.model.device) | |
outputs = self.model.generate( | |
**model_inputs, | |
max_new_tokens=256, | |
num_beams=5, | |
temperature=0.7, | |
do_sample=True | |
) | |
return self.processor.decode(outputs[0], skip_special_tokens=True).split("Assistant:")[1] | |
def _get_fixed_30s_segments(self, video_path: str) -> List[Tuple[float, float]]: | |
"""Helper to get video segments""" | |
duration = self._get_video_duration_seconds(video_path) | |
segments = [] | |
start = 0.0 | |
block_size = 10.0 | |
while start < duration: | |
end = min(start + block_size, duration) | |
segments.append((start, end)) | |
start = end | |
return segments | |
def _get_video_duration_seconds(self, video_path: str) -> float: | |
"""Helper to get video duration""" | |
import json | |
import subprocess | |
cmd = [ | |
"ffprobe", | |
"-v", "quiet", | |
"-print_format", "json", | |
"-show_format", | |
video_path | |
] | |
result = subprocess.run(cmd, capture_output=True, text=True) | |
info = json.loads(result.stdout) | |
return float(info["format"]["duration"]) | |
def _concatenate_scenes( | |
self, | |
video_path: str, | |
scene_times: List[Tuple[float, float]], | |
output_path: str | |
): | |
""" | |
Concatenate selected (start_sec, end_sec) scenes from 'video_path' into 'output_path' | |
using a complex ffmpeg filter instead of multiple intermediate files. | |
""" | |
if not scene_times: | |
logger.warning("No scenes to concatenate, skipping.") | |
return | |
# Build the filter_complex string | |
# For each scene i, we create two filter chains: one for video [vN] and one for audio [aN]. | |
# Then we feed them into the concat filter. | |
filter_complex_parts = [] | |
concat_inputs = [] | |
for i, (start_sec, end_sec) in enumerate(scene_times): | |
filter_complex_parts.append( | |
f"[0:v]trim=start={start_sec}:end={end_sec}," | |
f"setpts=PTS-STARTPTS[v{i}];" | |
) | |
filter_complex_parts.append( | |
f"[0:a]atrim=start={start_sec}:end={end_sec}," | |
f"asetpts=PTS-STARTPTS[a{i}];" | |
) | |
concat_inputs.append(f"[v{i}][a{i}]") | |
# Now build the actual concat invocation. | |
# n = number of segments to concat, v=1 video stream, a=1 audio stream | |
concat_filter = f"{''.join(concat_inputs)}concat=n={len(scene_times)}:v=1:a=1[outv][outa]" | |
filter_complex = "".join(filter_complex_parts) + concat_filter | |
# Build the ffmpeg command | |
cmd = [ | |
"ffmpeg", | |
"-y", # overwrite | |
"-i", video_path, | |
"-filter_complex", filter_complex, | |
"-map", "[outv]", | |
"-map", "[outa]", | |
"-c:v", "libx264", # or any codec of your choice | |
"-c:a", "aac", # or any audio codec of your choice | |
output_path | |
] | |
logger.info(f"Running ffmpeg command: {' '.join(cmd)}") | |
subprocess.run(cmd, check=True) | |
logger.info(f"Final video saved to: {output_path}") | |
def load_model( | |
checkpoint_path: Optional[str] = None, | |
base_model_id: str = "HuggingFaceTB/SmolVLM2-2.2B-Instruct", | |
device: str = "cuda" | |
): | |
"""Load the model and processor.""" | |
if device == "cuda" and not torch.cuda.is_available(): | |
raise RuntimeError("CUDA requested but not available") | |
if device == "cuda": | |
torch.cuda.empty_cache() | |
# Initialize CUDA | |
torch.cuda.init() | |
video_target_size = 384 | |
processor = AutoProcessor.from_pretrained(base_model_id) | |
processor.image_processor.size = {"longest_edge": video_target_size} | |
processor.image_processor.do_resize = True | |
processor.image_processor.do_image_splitting = False | |
model_kwargs = { | |
"torch_dtype": torch.bfloat16, | |
"device_map": device | |
} | |
if checkpoint_path: | |
model = SmolVLMForConditionalGeneration.from_pretrained( | |
checkpoint_path, | |
**model_kwargs | |
) | |
else: | |
model = SmolVLMForConditionalGeneration.from_pretrained( | |
base_model_id, | |
**model_kwargs | |
) | |
return model, processor | |
# def load_model( | |
# checkpoint_path: Optional[str] = None, | |
# base_model_id: str = "HuggingFaceTB/SmolVLM-2.2B-Instruct", | |
# device: str = "cuda" | |
# ): | |
# """Load the model and processor.""" | |
# # For demonstration, we set the target size | |
# video_target_size = 384 | |
# processor = AutoProcessor.from_pretrained(base_model_id) | |
# # Configure the image processor | |
# processor.image_processor.size = {"longest_edge": video_target_size} | |
# processor.image_processor.do_resize = True | |
# processor.image_processor.do_image_splitting = False | |
# if checkpoint_path: | |
# model = SmolVLMForConditionalGeneration.from_pretrained( | |
# checkpoint_path, | |
# torch_dtype=torch.bfloat16, | |
# device_map=device | |
# ) | |
# else: | |
# model = SmolVLMForConditionalGeneration.from_pretrained( | |
# base_model_id, | |
# torch_dtype=torch.bfloat16, | |
# device_map=device | |
# ) | |
# return model, processor | |
def main(): | |
checkpoint_path = "/fsx/miquel/smolvlmvideo/checkpoints/final-visionUnfrozen-balanced/checkpoint-6550" | |
base_model_id = "HuggingFaceTB/SmolVLM-2.2B-Instruct" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model, processor = load_model(checkpoint_path, base_model_id, device) | |
detector = BatchedVideoHighlightDetector(model, processor, device=device) | |
if len(sys.argv) < 3: | |
print("Usage: python video_highlight_detector.py <input_video> <output_video>") | |
sys.exit(1) | |
video_path = sys.argv[1] | |
output_path = sys.argv[2] | |
# Create highlight video | |
highlight_segments = detector.create_highlight_video(video_path, output_path) | |
print(f"Created highlight video with {len(highlight_segments)} segments") | |
if __name__ == "__main__": | |
main() | |