Spaces:
Running
on
Zero
Running
on
Zero
# Author: Huzheng Yang | |
# %% | |
USE_SPACES = True | |
if USE_SPACES: # huggingface ZeroGPU | |
try: | |
import spaces | |
except ImportError: | |
USE_SPACES = False # run on standard GPU | |
import os | |
import gradio as gr | |
import torch | |
from PIL import Image | |
import numpy as np | |
import time | |
import gradio as gr | |
from backbone import extract_features | |
from ncut_pytorch import NCUT, eigenvector_to_rgb | |
def compute_ncut( | |
features, | |
num_eig=100, | |
num_sample_ncut=10000, | |
affinity_focal_gamma=0.3, | |
knn_ncut=10, | |
knn_tsne=10, | |
embedding_method="UMAP", | |
num_sample_tsne=300, | |
perplexity=150, | |
n_neighbors=150, | |
min_dist=0.1, | |
sampling_method="fps", | |
metric="cosine", | |
): | |
logging_str = "" | |
num_nodes = np.prod(features.shape[:3]) | |
if num_nodes / 2 < num_eig: | |
# raise gr.Error("Number of eigenvectors should be less than half the number of nodes.") | |
gr.Warning("Number of eigenvectors should be less than half the number of nodes.\n" f"Setting num_eig to {num_nodes // 2 - 1}.") | |
num_eig = num_nodes // 2 - 1 | |
logging_str += f"Number of eigenvectors should be less than half the number of nodes.\n" f"Setting num_eig to {num_nodes // 2 - 1}.\n" | |
start = time.time() | |
eigvecs, eigvals = NCUT( | |
num_eig=num_eig, | |
num_sample=num_sample_ncut, | |
device="cuda" if torch.cuda.is_available() else "cpu", | |
affinity_focal_gamma=affinity_focal_gamma, | |
knn=knn_ncut, | |
sample_method=sampling_method, | |
distance=metric, | |
).fit_transform(features.reshape(-1, features.shape[-1])) | |
# print(f"NCUT time: {time.time() - start:.2f}s") | |
logging_str += f"NCUT time: {time.time() - start:.2f}s\n" | |
start = time.time() | |
_, rgb = eigenvector_to_rgb( | |
eigvecs, | |
method=embedding_method, | |
num_sample=num_sample_tsne, | |
perplexity=perplexity, | |
n_neighbors=n_neighbors, | |
min_distance=min_dist, | |
knn=knn_tsne, | |
device="cuda" if torch.cuda.is_available() else "cpu", | |
) | |
logging_str += f"{embedding_method} time: {time.time() - start:.2f}s\n" | |
rgb = rgb.reshape(features.shape[:3] + (3,)) | |
return rgb, logging_str, eigvecs | |
def dont_use_too_much_green(image_rgb): | |
# make sure the foval 40% of the image is red leading | |
x1, x2 = int(image_rgb.shape[1] * 0.3), int(image_rgb.shape[1] * 0.7) | |
y1, y2 = int(image_rgb.shape[2] * 0.3), int(image_rgb.shape[2] * 0.7) | |
sum_values = image_rgb[:, x1:x2, y1:y2].mean((0, 1, 2)) | |
sorted_indices = sum_values.argsort(descending=True) | |
image_rgb = image_rgb[:, :, :, sorted_indices] | |
return image_rgb | |
def to_pil_images(images): | |
return [ | |
Image.fromarray((image * 255).cpu().numpy().astype(np.uint8)).resize((256, 256), Image.Resampling.NEAREST) | |
for image in images | |
] | |
def pil_images_to_video(images, output_path, fps=5): | |
# from pil images to numpy | |
images = [np.array(image) for image in images] | |
# print("Saving video to", output_path) | |
import cv2 | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
height, width, _ = images[0].shape | |
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) | |
for image in images: | |
out.write(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) | |
out.release() | |
return output_path | |
# save up to 100 videos in disk | |
class VideoCache: | |
def __init__(self, max_videos=100): | |
self.max_videos = max_videos | |
self.videos = {} | |
def add_video(self, video_path): | |
if len(self.videos) >= self.max_videos: | |
pop_path = self.videos.popitem()[0] | |
try: | |
os.remove(pop_path) | |
except: | |
pass | |
self.videos[video_path] = video_path | |
def get_video(self, video_path): | |
return self.videos.get(video_path, None) | |
video_cache = VideoCache() | |
def get_random_path(length=10): | |
import random | |
import string | |
name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) | |
path = f'/tmp/{name}.mp4' | |
return path | |
default_images = ['./images/image_0.jpg', './images/image_1.jpg', './images/image_2.jpg', './images/image_3.jpg', './images/image_5.jpg'] | |
default_outputs = ['./images/ncut_0.jpg', './images/ncut_1.jpg', './images/ncut_2.jpg', './images/ncut_3.jpg', './images/ncut_5.jpg'] | |
default_outputs_independent = ['./images/ncut_0_independent.jpg', './images/ncut_1_independent.jpg', './images/ncut_2_independent.jpg', './images/ncut_3_independent.jpg', './images/ncut_5_independent.jpg'] | |
downscaled_images = ['./images/image_0_small.jpg', './images/image_1_small.jpg', './images/image_2_small.jpg', './images/image_3_small.jpg', './images/image_5_small.jpg'] | |
downscaled_outputs = ['./images/ncut_0_small.jpg', './images/ncut_1_small.jpg', './images/ncut_2_small.jpg', './images/ncut_3_small.jpg', './images/ncut_5_small.jpg'] | |
example_items = downscaled_images[:3] + downscaled_outputs[:3] | |
def ncut_run( | |
images, | |
model_name="SAM(sam_vit_b)", | |
layer=-1, | |
num_eig=100, | |
node_type="block", | |
affinity_focal_gamma=0.3, | |
num_sample_ncut=10000, | |
knn_ncut=10, | |
embedding_method="UMAP", | |
num_sample_tsne=1000, | |
knn_tsne=10, | |
perplexity=500, | |
n_neighbors=500, | |
min_dist=0.1, | |
sampling_method="fps", | |
old_school_ncut=False, | |
recursion=False, | |
recursion_l2_n_eigs=50, | |
recursion_l3_n_eigs=20, | |
recursion_metric="euclidean", | |
video_output=False, | |
): | |
logging_str = "" | |
if perplexity >= num_sample_tsne or n_neighbors >= num_sample_tsne: | |
# raise gr.Error("Perplexity must be less than the number of samples for t-SNE.") | |
gr.Warning("Perplexity/n_neighbors must be less than the number of samples.\n" f"Setting Perplexity to {num_sample_tsne-1}.") | |
logging_str += f"Perplexity/n_neighbors must be less than the number of samples.\n" f"Setting Perplexity to {num_sample_tsne-1}.\n" | |
perplexity = num_sample_tsne - 1 | |
n_neighbors = num_sample_tsne - 1 | |
node_type = node_type.split(":")[0].strip() | |
images = [image[0] for image in images] # remove the label | |
start = time.time() | |
features = extract_features( | |
images, model_name=model_name, node_type=node_type, layer=layer | |
) | |
# print(f"Feature extraction time (gpu): {time.time() - start:.2f}s") | |
logging_str += f"Backbone time: {time.time() - start:.2f}s\n" | |
if recursion: | |
rgbs = [] | |
inp = features | |
for i, n_eigs in enumerate([num_eig, recursion_l2_n_eigs, recursion_l3_n_eigs]): | |
logging_str += f"Recursion #{i+1}\n" | |
rgb, _logging_str, eigvecs = compute_ncut( | |
inp, | |
num_eig=n_eigs, | |
num_sample_ncut=num_sample_ncut, | |
affinity_focal_gamma=affinity_focal_gamma, | |
knn_ncut=knn_ncut, | |
knn_tsne=knn_tsne, | |
num_sample_tsne=num_sample_tsne, | |
embedding_method=embedding_method, | |
perplexity=perplexity, | |
n_neighbors=n_neighbors, | |
min_dist=min_dist, | |
sampling_method=sampling_method, | |
metric="cosine" if i == 0 else recursion_metric, | |
) | |
logging_str += _logging_str | |
rgb = dont_use_too_much_green(rgb) | |
rgbs.append(to_pil_images(rgb)) | |
inp = eigvecs.reshape(*features.shape[:3], -1) | |
return rgbs[0], rgbs[1], rgbs[2], logging_str | |
if old_school_ncut: # individual images | |
logging_str += "Running NCut for each image independently\n" | |
rgb = [] | |
for i_image in range(features.shape[0]): | |
feature = features[i_image] | |
_rgb, _logging_str, _ = compute_ncut( | |
feature[None], | |
num_eig=num_eig, | |
num_sample_ncut=num_sample_ncut, | |
affinity_focal_gamma=affinity_focal_gamma, | |
knn_ncut=knn_ncut, | |
knn_tsne=knn_tsne, | |
num_sample_tsne=num_sample_tsne, | |
embedding_method=embedding_method, | |
perplexity=perplexity, | |
n_neighbors=n_neighbors, | |
min_dist=min_dist, | |
sampling_method=sampling_method, | |
) | |
logging_str += _logging_str | |
rgb.append(_rgb[0]) | |
if not old_school_ncut: # joint across all images | |
rgb, _logging_str, _ = compute_ncut( | |
features, | |
num_eig=num_eig, | |
num_sample_ncut=num_sample_ncut, | |
affinity_focal_gamma=affinity_focal_gamma, | |
knn_ncut=knn_ncut, | |
knn_tsne=knn_tsne, | |
num_sample_tsne=num_sample_tsne, | |
embedding_method=embedding_method, | |
perplexity=perplexity, | |
n_neighbors=n_neighbors, | |
min_dist=min_dist, | |
sampling_method=sampling_method, | |
) | |
logging_str += _logging_str | |
rgb = dont_use_too_much_green(rgb) | |
if video_output: | |
video_path = get_random_path() | |
video_cache.add_video(video_path) | |
pil_images_to_video(to_pil_images(rgb), video_path) | |
return video_path, logging_str | |
else: | |
return to_pil_images(rgb), logging_str | |
def _ncut_run(*args, **kwargs): | |
try: | |
return ncut_run(*args, **kwargs) | |
except Exception as e: | |
gr.Error(str(e)) | |
return [], "Error: " + str(e) | |
if USE_SPACES: | |
def quick_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
def long_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
def longer_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
def super_duper_long_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
if not USE_SPACES: | |
def quick_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
def long_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
def longer_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
def super_duper_long_run(*args, **kwargs): | |
return _ncut_run(*args, **kwargs) | |
def extract_video_frames(video_path, max_frames=100): | |
from decord import VideoReader | |
vr = VideoReader(video_path) | |
num_frames = len(vr) | |
if num_frames > max_frames: | |
gr.Warning(f"Video has {num_frames} frames. Only using {max_frames} frames. Evenly spaced.") | |
frame_idx = np.linspace(0, num_frames - 1, max_frames, dtype=int).tolist() | |
else: | |
frame_idx = list(range(num_frames)) | |
frames = vr.get_batch(frame_idx).asnumpy() | |
# return as list of PIL images | |
return [(Image.fromarray(frames[i]), "") for i in range(frames.shape[0])] | |
def run_fn( | |
images, | |
model_name="SAM(sam_vit_b)", | |
layer=-1, | |
num_eig=100, | |
node_type="block", | |
affinity_focal_gamma=0.3, | |
num_sample_ncut=10000, | |
knn_ncut=10, | |
embedding_method="UMAP", | |
num_sample_tsne=1000, | |
knn_tsne=10, | |
perplexity=500, | |
n_neighbors=500, | |
min_dist=0.1, | |
sampling_method="fps", | |
old_school_ncut=False, | |
max_frames=100, | |
recursion=False, | |
recursion_l2_n_eigs=50, | |
recursion_l3_n_eigs=20, | |
recursion_metric="euclidean", | |
): | |
# print("Running...") | |
if images is None: | |
gr.Warning("No images selected.") | |
return [], "No images selected." | |
video_output = False | |
if isinstance(images, str): | |
images = extract_video_frames(images, max_frames=max_frames) | |
video_output = True | |
if sampling_method == "fps": | |
sampling_method = "farthest" | |
kwargs = { | |
"model_name": model_name, | |
"layer": layer, | |
"num_eig": num_eig, | |
"node_type": node_type, | |
"affinity_focal_gamma": affinity_focal_gamma, | |
"num_sample_ncut": num_sample_ncut, | |
"knn_ncut": knn_ncut, | |
"embedding_method": embedding_method, | |
"num_sample_tsne": num_sample_tsne, | |
"knn_tsne": knn_tsne, | |
"perplexity": perplexity, | |
"n_neighbors": n_neighbors, | |
"min_dist": min_dist, | |
"sampling_method": sampling_method, | |
"old_school_ncut": old_school_ncut, | |
"recursion": recursion, | |
"recursion_l2_n_eigs": recursion_l2_n_eigs, | |
"recursion_l3_n_eigs": recursion_l3_n_eigs, | |
"recursion_metric": recursion_metric, | |
"video_output": video_output, | |
} | |
# print(kwargs) | |
num_images = len(images) | |
if num_images > 100: | |
return super_duper_long_run(images, **kwargs) | |
if recursion: | |
return longer_run(images, **kwargs) | |
if num_images > 50: | |
return longer_run(images, **kwargs) | |
if old_school_ncut: | |
return longer_run(images, **kwargs) | |
if num_images > 10: | |
return long_run(images, **kwargs) | |
if embedding_method == "UMAP": | |
if perplexity >= 250 or num_sample_tsne >= 500: | |
return longer_run(images, **kwargs) | |
return long_run(images, **kwargs) | |
if embedding_method == "t-SNE": | |
if perplexity >= 250 or num_sample_tsne >= 500: | |
return long_run(images, **kwargs) | |
return quick_run(images, **kwargs) | |
return quick_run(images, **kwargs) | |
def make_input_images_section(): | |
gr.Markdown('### Input Images') | |
input_gallery = gr.Gallery(value=None, label="Select images", show_label=False, elem_id="images", columns=[3], rows=[1], object_fit="contain", height="auto", type="pil", show_share_button=False) | |
submit_button = gr.Button("🔴RUN", elem_id="submit_button") | |
clear_images_button = gr.Button("🗑️Clear", elem_id='clear_button') | |
return input_gallery, submit_button, clear_images_button | |
def make_input_video_section(): | |
gr.Markdown('### Input Video') | |
input_gallery = gr.Video(value=None, label="Select video", elem_id="video-input", height="auto", show_share_button=False) | |
gr.Markdown('_image backbone model is used to extract features from each frame, NCUT is computed on all frames_') | |
# max_frames_number = gr.Number(100, label="Max frames", elem_id="max_frames") | |
max_frames_number = gr.Slider(1, 200, step=1, label="Max frames", value=100, elem_id="max_frames") | |
submit_button = gr.Button("🔴RUN", elem_id="submit_button") | |
clear_images_button = gr.Button("🗑️Clear", elem_id='clear_button') | |
return input_gallery, submit_button, clear_images_button, max_frames_number | |
def make_example_images_section(): | |
gr.Markdown('### Load Images 👇') | |
load_images_button = gr.Button("Load Example", elem_id="load-images-button") | |
example_gallery = gr.Gallery(value=example_items, label="Example Set A", show_label=False, columns=[3], rows=[2], object_fit="scale-down", height="200px", show_share_button=False, elem_id="example-gallery") | |
hide_button = gr.Button("Hide Example", elem_id="hide-button") | |
hide_button.click( | |
fn=lambda: gr.update(visible=False), | |
outputs=example_gallery | |
) | |
return load_images_button, example_gallery, hide_button | |
def make_example_video_section(): | |
gr.Markdown('### Load Video 👇') | |
load_video_button = gr.Button("Load Example", elem_id="load-video-button") | |
return load_video_button | |
def make_dataset_images_section(): | |
with gr.Accordion("➡️ Click to expand: Load from dataset", open=False): | |
dataset_names = [ | |
'UCSC-VLAA/Recap-COCO-30K', | |
'nateraw/pascal-voc-2012', | |
'johnowhitaker/imagenette2-320', | |
'jainr3/diffusiondb-pixelart', | |
'nielsr/CelebA-faces', | |
'JapanDegitalMaterial/Places_in_Japan', | |
'Borismile/Anime-dataset', | |
] | |
dataset_dropdown = gr.Dropdown(dataset_names, label="Dataset name", value="UCSC-VLAA/Recap-COCO-30K", elem_id="dataset") | |
num_images_slider = gr.Slider(1, 200, step=1, label="Number of images", value=9, elem_id="num_images") | |
# random_seed_slider = gr.Number(0, label="Random seed", elem_id="random_seed") | |
random_seed_slider = gr.Slider(0, 1000, step=1, label="Random seed", value=1, elem_id="random_seed") | |
load_dataset_button = gr.Button("Load Dataset", elem_id="load-dataset-button") | |
def load_dataset_images(dataset_name, num_images=10, random_seed=42): | |
from datasets import load_dataset | |
try: | |
dataset = load_dataset(dataset_name, trust_remote_code=True) | |
key = list(dataset.keys())[0] | |
dataset = dataset[key] | |
except Exception as e: | |
gr.Error(f"Error loading dataset {dataset_name}: {e}") | |
return None | |
if num_images > len(dataset): | |
num_images = len(dataset) | |
image_idx = np.random.RandomState(random_seed).choice(len(dataset), num_images, replace=False) | |
image_idx = image_idx.tolist() | |
images = [dataset[i]['image'] for i in image_idx] | |
return images | |
load_dataset_button.click(load_dataset_images, inputs=[dataset_dropdown, num_images_slider, random_seed_slider], outputs=[input_gallery]) | |
return dataset_dropdown, num_images_slider, random_seed_slider, load_dataset_button | |
def make_output_images_section(): | |
gr.Markdown('### Output Images') | |
output_gallery = gr.Gallery(value=[], label="NCUT Embedding", show_label=False, elem_id="ncut", columns=[3], rows=[1], object_fit="contain", height="auto") | |
return output_gallery | |
def make_parameters_section(): | |
gr.Markdown('### Parameters') | |
model_dropdown = gr.Dropdown(["SAM(sam_vit_b)", "MobileSAM", "DiNO(dinov2_vitb14_reg)", "CLIP(openai/clip-vit-base-patch16)", "MAE(vit_base)"], label="Backbone", value="SAM(sam_vit_b)", elem_id="model_name") | |
layer_slider = gr.Slider(0, 11, step=1, label="Backbone: Layer index", value=11, elem_id="layer") | |
node_type_dropdown = gr.Dropdown(["attn: attention output", "mlp: mlp output", "block: sum of residual"], label="Backbone: Layer type", value="block: sum of residual", elem_id="node_type", info="which feature to take from each layer?") | |
num_eig_slider = gr.Slider(1, 1000, step=1, label="NCUT: Number of eigenvectors", value=100, elem_id="num_eig", info='increase for more clusters') | |
affinity_focal_gamma_slider = gr.Slider(0.01, 1, step=0.01, label="NCUT: Affinity focal gamma", value=0.5, elem_id="affinity_focal_gamma", info="decrease for shaper segmentation") | |
with gr.Accordion("➡️ Click to expand: more parameters", open=False): | |
num_sample_ncut_slider = gr.Slider(100, 50000, step=100, label="NCUT: num_sample", value=10000, elem_id="num_sample_ncut", info="Nyström approximation") | |
sampling_method_dropdown = gr.Dropdown(["fps", "random"], label="NCUT: Sampling method", value="fps", elem_id="sampling_method", info="Nyström approximation") | |
knn_ncut_slider = gr.Slider(1, 100, step=1, label="NCUT: KNN", value=10, elem_id="knn_ncut", info="Nyström approximation") | |
embedding_method_dropdown = gr.Dropdown(["tsne_3d", "umap_3d", "umap_shpere", "tsne_2d", "umap_2d"], label="Coloring method", value="tsne_3d", elem_id="embedding_method") | |
num_sample_tsne_slider = gr.Slider(100, 1000, step=100, label="t-SNE/UMAP: num_sample", value=300, elem_id="num_sample_tsne", info="Nyström approximation") | |
knn_tsne_slider = gr.Slider(1, 100, step=1, label="t-SNE/UMAP: KNN", value=10, elem_id="knn_tsne", info="Nyström approximation") | |
perplexity_slider = gr.Slider(10, 500, step=10, label="t-SNE: Perplexity", value=150, elem_id="perplexity") | |
n_neighbors_slider = gr.Slider(10, 500, step=10, label="UMAP: n_neighbors", value=150, elem_id="n_neighbors") | |
min_dist_slider = gr.Slider(0.1, 1, step=0.1, label="UMAP: min_dist", value=0.1, elem_id="min_dist") | |
return [model_dropdown, layer_slider, node_type_dropdown, num_eig_slider, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, | |
sampling_method_dropdown] | |
with gr.Blocks() as demo: | |
with gr.Tab('AlignedCut'): | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
input_gallery, submit_button, clear_images_button = make_input_images_section() | |
load_images_button, example_gallery, hide_button = make_example_images_section() | |
dataset_dropdown, num_images_slider, random_seed_slider, load_dataset_button = make_dataset_images_section() | |
with gr.Column(scale=5, min_width=200): | |
output_gallery = make_output_images_section() | |
[ | |
model_dropdown, layer_slider, node_type_dropdown, num_eig_slider, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, | |
sampling_method_dropdown | |
] = make_parameters_section() | |
# logging text box | |
logging_text = gr.Textbox("Logging information", label="Logging", elem_id="logging", type="text", placeholder="Logging information") | |
load_images_button.click(lambda x: default_images, outputs=input_gallery) | |
clear_images_button.click(lambda x: ([], []), outputs=[input_gallery, output_gallery]) | |
submit_button.click( | |
run_fn, | |
inputs=[ | |
input_gallery, model_dropdown, layer_slider, num_eig_slider, node_type_dropdown, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, sampling_method_dropdown | |
], | |
outputs=[output_gallery, logging_text] | |
) | |
with gr.Tab('NCut'): | |
gr.Markdown('#### NCut (Legacy), not aligned, no Nyström approximation') | |
gr.Markdown('Each image is solved independently, <em>color is <b>not</b> aligned across images</em>') | |
gr.Markdown('---') | |
gr.Markdown('<p style="text-align: center;"><b>NCut vs. AlignedCut</b></p>') | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('#### Pros') | |
gr.Markdown('- Easy Solution. Use less eigenvectors.') | |
gr.Markdown('- Exact solution. No Nyström approximation.') | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('#### Cons') | |
gr.Markdown('- Not aligned. Distance is not preserved across images. No pseudo-labeling or correspondence.') | |
gr.Markdown('- Poor complexity scaling. Unable to handle large number of pixels.') | |
gr.Markdown('---') | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown(' ') | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('<em>color is <b>not</b> aligned across images</em> 👇') | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
input_gallery, submit_button, clear_images_button = make_input_images_section() | |
load_images_button, example_gallery, hide_button = make_example_images_section() | |
dataset_dropdown, num_images_slider, random_seed_slider, load_dataset_button = make_dataset_images_section() | |
example_gallery.visible = False | |
hide_button.visible = False | |
with gr.Column(scale=5, min_width=200): | |
output_gallery = make_output_images_section() | |
[ | |
model_dropdown, layer_slider, node_type_dropdown, num_eig_slider, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, | |
sampling_method_dropdown | |
] = make_parameters_section() | |
old_school_ncut_checkbox = gr.Checkbox(label="Old school NCut", value=True, elem_id="old_school_ncut") | |
invisible_list = [old_school_ncut_checkbox, num_sample_ncut_slider, knn_ncut_slider, | |
num_sample_tsne_slider, knn_tsne_slider, sampling_method_dropdown] | |
for item in invisible_list: | |
item.visible = False | |
# logging text box | |
logging_text = gr.Textbox("Logging information", label="Logging", elem_id="logging", type="text", placeholder="Logging information") | |
load_images_button.click(lambda x: (default_images, default_outputs_independent), outputs=[input_gallery, output_gallery]) | |
clear_images_button.click(lambda x: ([], []), outputs=[input_gallery, output_gallery]) | |
submit_button.click( | |
run_fn, | |
inputs=[ | |
input_gallery, model_dropdown, layer_slider, num_eig_slider, node_type_dropdown, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, sampling_method_dropdown, | |
old_school_ncut_checkbox | |
], | |
outputs=[output_gallery, logging_text] | |
) | |
with gr.Tab('Recursive Cut'): | |
gr.Markdown('NCUT can be applied recursively, the eigenvectors from previous iteration is the input for the next iteration NCUT. ') | |
gr.Markdown('__Recursive NCUT__ amplifies small object parts, please see [Documentation](https://ncut-pytorch.readthedocs.io/en/latest/how_to_get_better_segmentation/#recursive-ncut)') | |
gr.Markdown('---') | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
input_gallery, submit_button, clear_images_button = make_input_images_section() | |
load_images_button, example_gallery, hide_button = make_example_images_section() | |
load_images_button.click(lambda x: default_images, outputs=[input_gallery]) | |
example_gallery.visible = False | |
hide_button.visible = False | |
dataset_dropdown, num_images_slider, random_seed_slider, load_dataset_button = make_dataset_images_section() | |
num_images_slider.value = 100 | |
dataset_dropdown.value = 'nielsr/CelebA-faces' | |
with gr.Column(scale=5, min_width=200): | |
with gr.Accordion("➡️ Recursion config", open=True): | |
l1_num_eig_slider = gr.Slider(1, 1000, step=1, label="Recursion #1: N eigenvectors", value=100, elem_id="l1_num_eig") | |
l2_num_eig_slider = gr.Slider(1, 1000, step=1, label="Recursion #2: N eigenvectors", value=50, elem_id="l2_num_eig") | |
l3_num_eig_slider = gr.Slider(1, 1000, step=1, label="Recursion #3: N eigenvectors", value=25, elem_id="l3_num_eig") | |
metric_dropdown = gr.Dropdown(["euclidean", "cosine"], label="Recursion distance metric", value="cosine", elem_id="recursion_metric") | |
[ | |
model_dropdown, layer_slider, node_type_dropdown, num_eig_slider, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, | |
sampling_method_dropdown | |
] = make_parameters_section() | |
num_eig_slider.visible = False | |
model_dropdown.value = 'DiNO(dinov2_vitb14_reg)' | |
layer_slider.value = 6 | |
node_type_dropdown.value = 'attn: attention output' | |
affinity_focal_gamma_slider.value = 0.25 | |
# logging text box | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('### Output (Recursion #1)') | |
l1_gallery = gr.Gallery(value=[], label="Recursion #1", show_label=False, elem_id="ncut_l1", columns=[3], rows=[5], object_fit="contain", height="auto") | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('### Output (Recursion #2)') | |
l2_gallery = gr.Gallery(value=[], label="Recursion #2", show_label=False, elem_id="ncut_l2", columns=[3], rows=[5], object_fit="contain", height="auto") | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('### Output (Recursion #3)') | |
l3_gallery = gr.Gallery(value=[], label="Recursion #3", show_label=False, elem_id="ncut_l3", columns=[3], rows=[5], object_fit="contain", height="auto") | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown(' ') | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown(' ') | |
with gr.Column(scale=5, min_width=200): | |
logging_text = gr.Textbox("Logging information", label="Logging", elem_id="logging", type="text", placeholder="Logging information") | |
true_placeholder = gr.Checkbox(label="True placeholder", value=True, elem_id="true_placeholder") | |
true_placeholder.visible = False | |
false_placeholder = gr.Checkbox(label="False placeholder", value=False, elem_id="false_placeholder") | |
false_placeholder.visible = False | |
number_placeholder = gr.Number(0, label="Number placeholder", elem_id="number_placeholder") | |
number_placeholder.visible = False | |
clear_images_button.click(lambda x: ([], [], [], []), outputs=[input_gallery, l1_gallery, l2_gallery, l3_gallery]) | |
submit_button.click( | |
run_fn, | |
inputs=[ | |
input_gallery, model_dropdown, layer_slider, l1_num_eig_slider, node_type_dropdown, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, sampling_method_dropdown, | |
false_placeholder, number_placeholder, true_placeholder, | |
l2_num_eig_slider, l3_num_eig_slider, metric_dropdown, | |
], | |
outputs=[l1_gallery, l2_gallery, l3_gallery, logging_text] | |
) | |
with gr.Tab('Video'): | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
video_input_gallery, submit_button, clear_images_button, max_frame_number = make_input_video_section() | |
# load_video_button = make_example_video_section() | |
with gr.Column(scale=5, min_width=200): | |
video_output_gallery = gr.Video(value=None, label="NCUT Embedding", elem_id="ncut", height="auto", show_share_button=False) | |
[ | |
model_dropdown, layer_slider, node_type_dropdown, num_eig_slider, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, | |
sampling_method_dropdown | |
] = make_parameters_section() | |
num_sample_tsne_slider.value = 1000 | |
perplexity_slider.value = 500 | |
n_neighbors_slider.value = 500 | |
knn_tsne_slider.value = 20 | |
# logging text box | |
logging_text = gr.Textbox("Logging information", label="Logging", elem_id="logging", type="text", placeholder="Logging information") | |
load_images_button.click(lambda x: (default_images, default_outputs), outputs=[input_gallery, output_gallery]) | |
clear_images_button.click(lambda x: (None, []), outputs=[video_input_gallery, video_output_gallery]) | |
place_holder_false = gr.Checkbox(label="Place holder", value=False, elem_id="place_holder_false") | |
place_holder_false.visible = False | |
submit_button.click( | |
run_fn, | |
inputs=[ | |
video_input_gallery, model_dropdown, layer_slider, num_eig_slider, node_type_dropdown, | |
affinity_focal_gamma_slider, num_sample_ncut_slider, knn_ncut_slider, | |
embedding_method_dropdown, num_sample_tsne_slider, knn_tsne_slider, | |
perplexity_slider, n_neighbors_slider, min_dist_slider, sampling_method_dropdown, | |
place_holder_false, max_frame_number | |
], | |
outputs=[video_output_gallery, logging_text] | |
) | |
with gr.Tab('Text'): | |
gr.Markdown('=== under construction ===') | |
gr.Markdown('Please see the [Documentation](https://ncut-pytorch.readthedocs.io/en/latest/gallery_llama3/) for example of NCUT on text input.') | |
gr.Markdown('---') | |
gr.Markdown('![ncut](https://ncut-pytorch.readthedocs.io/en/latest/images/gallery/llama3/llama3_layer_31.jpg)') | |
with gr.Tab('Compare'): | |
with gr.Row(): | |
with gr.Column(scale=5, min_width=200): | |
input_gallery, submit_button, clear_images_button = make_input_images_section() | |
submit_button.visible = False | |
load_images_button, example_gallery, hide_button = make_example_images_section() | |
example_gallery.visible = False | |
hide_button.visible = False | |
dataset_dropdown, num_images_slider, random_seed_slider, load_dataset_button = make_dataset_images_section() | |
load_images_button.click(lambda x: default_images, outputs=input_gallery) | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('### Output Model1') | |
output_gallery1 = gr.Gallery(value=[], label="NCUT Embedding", show_label=False, elem_id="ncut1", columns=[3], rows=[1], object_fit="contain", height="auto") | |
submit_button1 = gr.Button("🔴RUN", elem_id="submit_button1") | |
[ | |
model_dropdown1, layer_slider1, node_type_dropdown1, num_eig_slider1, | |
affinity_focal_gamma_slider1, num_sample_ncut_slider1, knn_ncut_slider1, | |
embedding_method_dropdown1, num_sample_tsne_slider1, knn_tsne_slider1, | |
perplexity_slider1, n_neighbors_slider1, min_dist_slider1, | |
sampling_method_dropdown1 | |
] = make_parameters_section() | |
model_dropdown1.value = 'DiNO(dinov2_vitb14_reg)' | |
layer_slider1.value = 11 | |
node_type_dropdown1.value = 'block: sum of residual' | |
# logging text box | |
logging_text1 = gr.Textbox("Logging information", label="Logging", elem_id="logging", type="text", placeholder="Logging information") | |
with gr.Column(scale=5, min_width=200): | |
gr.Markdown('### Output Model2') | |
output_gallery2 = gr.Gallery(value=[], label="NCUT Embedding", show_label=False, elem_id="ncut2", columns=[3], rows=[1], object_fit="contain", height="auto") | |
submit_button2 = gr.Button("🔴RUN", elem_id="submit_button2") | |
[ | |
model_dropdown2, layer_slider2, node_type_dropdown2, num_eig_slider2, | |
affinity_focal_gamma_slider2, num_sample_ncut_slider2, knn_ncut_slider2, | |
embedding_method_dropdown2, num_sample_tsne_slider2, knn_tsne_slider2, | |
perplexity_slider2, n_neighbors_slider2, min_dist_slider2, | |
sampling_method_dropdown2 | |
] = make_parameters_section() | |
model_dropdown2.value = 'DiNO(dinov2_vitb14_reg)' | |
layer_slider2.value = 9 | |
node_type_dropdown2.value = 'attn: attention output' | |
# logging text box | |
logging_text2 = gr.Textbox("Logging information", label="Logging", elem_id="logging", type="text", placeholder="Logging information") | |
clear_images_button.click(lambda x: ([], [], []), outputs=[input_gallery, output_gallery1, output_gallery2]) | |
submit_button1.click( | |
run_fn, | |
inputs=[ | |
input_gallery, model_dropdown1, layer_slider1, num_eig_slider1, node_type_dropdown1, | |
affinity_focal_gamma_slider1, num_sample_ncut_slider1, knn_ncut_slider1, | |
embedding_method_dropdown1, num_sample_tsne_slider1, knn_tsne_slider1, | |
perplexity_slider1, n_neighbors_slider1, min_dist_slider1, sampling_method_dropdown1 | |
], | |
outputs=[output_gallery1, logging_text1] | |
) | |
submit_button2.click( | |
run_fn, | |
inputs=[ | |
input_gallery, model_dropdown2, layer_slider2, num_eig_slider2, node_type_dropdown2, | |
affinity_focal_gamma_slider2, num_sample_ncut_slider2, knn_ncut_slider2, | |
embedding_method_dropdown2, num_sample_tsne_slider2, knn_tsne_slider2, | |
perplexity_slider2, n_neighbors_slider2, min_dist_slider2, sampling_method_dropdown2 | |
], | |
outputs=[output_gallery2, logging_text2] | |
) | |
demo.launch(share=True) | |
# %% | |