huzey commited on
Commit
e9f5121
1 Parent(s): 55dc840

update cache

Browse files
Files changed (1) hide show
  1. app.py +60 -5
app.py CHANGED
@@ -464,20 +464,75 @@ def image_clip_feature(
464
  return outputs
465
 
466
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
  def extract_features(images, model_name="sam", node_type="block", layer=-1):
 
 
 
 
 
 
 
 
 
 
468
  if model_name == "SAM(sam_vit_b)":
469
  if not use_cuda:
470
  gr.warning("GPU not detected. Running SAM on CPU, ~30s/image.")
471
- return image_sam_feature(images, node_type=node_type, layer=layer)
472
  elif model_name == 'MobileSAM':
473
- return image_mobilesam_feature(images, node_type=node_type, layer=layer)
474
  elif model_name == "DiNO(dinov2_vitb14_reg)":
475
- return image_dino_feature(images, node_type=node_type, layer=layer)
476
  elif model_name == "CLIP(openai/clip-vit-base-patch16)":
477
- return image_clip_feature(images, node_type=node_type, layer=layer)
478
  else:
479
  raise ValueError(f"Model {model_name} not supported.")
480
 
 
 
 
 
481
 
482
  def compute_ncut(
483
  features,
@@ -584,7 +639,7 @@ demo = gr.Interface(
584
  gr.Dropdown(["attn", "mlp", "block"], label="Node type", value="block", elem_id="node_type", info="attn: attention output, mlp: mlp output, block: sum of residual stream"),
585
  gr.Slider(0, 11, step=1, label="Layer", value=11, elem_id="layer", info="which layer of the image backbone features"),
586
  gr.Slider(1, 1000, step=1, label="Number of eigenvectors", value=100, elem_id="num_eig", info='increase for more object parts, decrease for whole object'),
587
- gr.Slider(0.01, 1, step=0.01, label="Affinity focal gamma", value=0.5, elem_id="affinity_focal_gamma", info="decrease for more aggressive cleaning on the affinity matrix"),
588
  ],
589
  gr.Gallery(value=default_outputs, label="NCUT Embedding", show_label=False, elem_id="ncut", columns=[3], rows=[1], object_fit="contain", height="auto"),
590
  additional_inputs=[
 
464
  return outputs
465
 
466
 
467
+
468
+ import hashlib
469
+ import pickle
470
+ import sys
471
+ from collections import OrderedDict
472
+
473
+ # Cache dictionary with limited size
474
+ class LimitedSizeCache(OrderedDict):
475
+ def __init__(self, max_size_bytes):
476
+ self.max_size_bytes = max_size_bytes
477
+ self.current_size_bytes = 0
478
+ super().__init__()
479
+
480
+ def __setitem__(self, key, value):
481
+ item_size = self.get_item_size(value)
482
+ # Evict items until there is enough space
483
+ while self.current_size_bytes + item_size > self.max_size_bytes:
484
+ self.popitem(last=False)
485
+ super().__setitem__(key, value)
486
+ self.current_size_bytes += item_size
487
+
488
+ def __delitem__(self, key):
489
+ value = self[key]
490
+ super().__delitem__(key)
491
+ self.current_size_bytes -= self.get_item_size(value)
492
+
493
+ def get_item_size(self, value):
494
+ """Estimate the size of the value in bytes."""
495
+ return sys.getsizeof(value)
496
+
497
+ # Initialize the cache with a 4GB limit
498
+ cache = LimitedSizeCache(max_size_bytes=4 * 1024 * 1024 * 1024) # 4GB
499
+
500
+ def compute_hash(*args, **kwargs):
501
+ """Compute a unique hash based on the function arguments."""
502
+ hasher = hashlib.sha256()
503
+ pickled_args = pickle.dumps((args, kwargs))
504
+ hasher.update(pickled_args)
505
+ return hasher.hexdigest()
506
+
507
+
508
  def extract_features(images, model_name="sam", node_type="block", layer=-1):
509
+ # Compute the cache key
510
+ cache_key = compute_hash(images, model_name, node_type, layer)
511
+
512
+ # Check if the result is already in the cache
513
+ if cache_key in cache:
514
+ print("Cache hit!")
515
+ return cache[cache_key]
516
+
517
+
518
+ # Compute the result if not in cache
519
  if model_name == "SAM(sam_vit_b)":
520
  if not use_cuda:
521
  gr.warning("GPU not detected. Running SAM on CPU, ~30s/image.")
522
+ result = image_sam_feature(images, node_type=node_type, layer=layer)
523
  elif model_name == 'MobileSAM':
524
+ result = image_mobilesam_feature(images, node_type=node_type, layer=layer)
525
  elif model_name == "DiNO(dinov2_vitb14_reg)":
526
+ result = image_dino_feature(images, node_type=node_type, layer=layer)
527
  elif model_name == "CLIP(openai/clip-vit-base-patch16)":
528
+ result = image_clip_feature(images, node_type=node_type, layer=layer)
529
  else:
530
  raise ValueError(f"Model {model_name} not supported.")
531
 
532
+ # Store the result in the cache
533
+ cache[cache_key] = result
534
+
535
+ return result
536
 
537
  def compute_ncut(
538
  features,
 
639
  gr.Dropdown(["attn", "mlp", "block"], label="Node type", value="block", elem_id="node_type", info="attn: attention output, mlp: mlp output, block: sum of residual stream"),
640
  gr.Slider(0, 11, step=1, label="Layer", value=11, elem_id="layer", info="which layer of the image backbone features"),
641
  gr.Slider(1, 1000, step=1, label="Number of eigenvectors", value=100, elem_id="num_eig", info='increase for more object parts, decrease for whole object'),
642
+ gr.Slider(0.01, 1, step=0.01, label="Affinity focal gamma", value=0.3, elem_id="affinity_focal_gamma", info="decrease for more aggressive cleaning on the affinity matrix"),
643
  ],
644
  gr.Gallery(value=default_outputs, label="NCUT Embedding", show_label=False, elem_id="ncut", columns=[3], rows=[1], object_fit="contain", height="auto"),
645
  additional_inputs=[