Spaces:
Running
on
Zero
Running
on
Zero
add paper
Browse files
app.py
CHANGED
@@ -183,7 +183,7 @@ downscaled_outputs = default_outputs
|
|
183 |
example_items = downscaled_images[:3] + downscaled_outputs[:3]
|
184 |
|
185 |
|
186 |
-
def run_alignedthreemodelattnnodes(images, model, batch_size=
|
187 |
|
188 |
use_cuda = torch.cuda.is_available()
|
189 |
device = torch.device("cuda" if use_cuda else "cpu")
|
@@ -535,17 +535,17 @@ def run_fn(
|
|
535 |
}
|
536 |
# print(kwargs)
|
537 |
num_images = len(images)
|
538 |
-
if num_images
|
539 |
return super_duper_long_run(model, images, **kwargs)
|
540 |
if 'diffusion' in model_name.lower():
|
541 |
return super_duper_long_run(model, images, **kwargs)
|
542 |
if recursion:
|
543 |
return longer_run(model, images, **kwargs)
|
544 |
-
if num_images
|
545 |
return longer_run(model, images, **kwargs)
|
546 |
if old_school_ncut:
|
547 |
return longer_run(model, images, **kwargs)
|
548 |
-
if num_images
|
549 |
return long_run(model, images, **kwargs)
|
550 |
if embedding_method == "UMAP":
|
551 |
if perplexity >= 250 or num_sample_tsne >= 500:
|
@@ -1020,6 +1020,8 @@ with demo:
|
|
1020 |
gr.Markdown('---')
|
1021 |
gr.Markdown('**Features are aligned across models and layers.** A linear alignment transform is trained for each model/layer, learning signal comes from 1) fMRI brain activation and 2) segmentation preserving eigen-constraints.')
|
1022 |
gr.Markdown('NCUT is computed on the concatenated graph of all models, layers, and images. Color is **aligned** across all models and layers.')
|
|
|
|
|
1023 |
gr.Markdown('---')
|
1024 |
with gr.Row():
|
1025 |
with gr.Column(scale=5, min_width=200):
|
|
|
183 |
example_items = downscaled_images[:3] + downscaled_outputs[:3]
|
184 |
|
185 |
|
186 |
+
def run_alignedthreemodelattnnodes(images, model, batch_size=16):
|
187 |
|
188 |
use_cuda = torch.cuda.is_available()
|
189 |
device = torch.device("cuda" if use_cuda else "cpu")
|
|
|
535 |
}
|
536 |
# print(kwargs)
|
537 |
num_images = len(images)
|
538 |
+
if num_images >= 100:
|
539 |
return super_duper_long_run(model, images, **kwargs)
|
540 |
if 'diffusion' in model_name.lower():
|
541 |
return super_duper_long_run(model, images, **kwargs)
|
542 |
if recursion:
|
543 |
return longer_run(model, images, **kwargs)
|
544 |
+
if num_images >= 50:
|
545 |
return longer_run(model, images, **kwargs)
|
546 |
if old_school_ncut:
|
547 |
return longer_run(model, images, **kwargs)
|
548 |
+
if num_images >= 10:
|
549 |
return long_run(model, images, **kwargs)
|
550 |
if embedding_method == "UMAP":
|
551 |
if perplexity >= 250 or num_sample_tsne >= 500:
|
|
|
1020 |
gr.Markdown('---')
|
1021 |
gr.Markdown('**Features are aligned across models and layers.** A linear alignment transform is trained for each model/layer, learning signal comes from 1) fMRI brain activation and 2) segmentation preserving eigen-constraints.')
|
1022 |
gr.Markdown('NCUT is computed on the concatenated graph of all models, layers, and images. Color is **aligned** across all models and layers.')
|
1023 |
+
gr.Markdown('')
|
1024 |
+
gr.Markdown("To see a good pattern, you will need to load 100 images. Running out of HuggingFace GPU Quota? Try [Demo](https://ncut-pytorch.readthedocs.io/en/latest/demo/) hosted at UPenn")
|
1025 |
gr.Markdown('---')
|
1026 |
with gr.Row():
|
1027 |
with gr.Column(scale=5, min_width=200):
|