huzey commited on
Commit
fda4350
·
1 Parent(s): e1001b9

update gpu

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -145,6 +145,7 @@ class MobileSAM(nn.Module):
145
 
146
  mobilesam = MobileSAM()
147
 
 
148
  def image_mobilesam_feature(
149
  images,
150
  node_type="block",
@@ -247,6 +248,7 @@ class SAM(torch.nn.Module):
247
 
248
  sam = SAM()
249
 
 
250
  def image_sam_feature(
251
  images,
252
  node_type="block",
@@ -327,6 +329,7 @@ class DiNOv2(torch.nn.Module):
327
 
328
  dinov2 = DiNOv2()
329
 
 
330
  def image_dino_feature(images, node_type="block", layer=-1):
331
 
332
  if USE_CUDA:
@@ -424,6 +427,7 @@ class CLIP(torch.nn.Module):
424
 
425
  clip = CLIP()
426
 
 
427
  def image_clip_feature(
428
  images, node_type="block", layer=-1
429
  ):
@@ -495,7 +499,6 @@ def compute_hash(*args, **kwargs):
495
  return hasher.hexdigest()
496
 
497
 
498
- @spaces.GPU(duration=30)
499
  def run_model_on_image(images, model_name="sam", node_type="block", layer=-1):
500
  global USE_CUDA
501
  USE_CUDA = True
@@ -516,12 +519,12 @@ def run_model_on_image(images, model_name="sam", node_type="block", layer=-1):
516
  USE_CUDA = False
517
  return result
518
 
519
- def extract_features(images, model_name="mobilesam", node_type="block", layer=-1):
520
  resolution_dict = {
521
- "mobilesam": (1024, 1024),
522
- "sam(sam_vit_b)": (1024, 1024),
523
- "dinov2(dinov2_vitb14_reg)": (448, 448),
524
- "clip(openai/clip-vit-base-patch16)": (224, 224),
525
  }
526
  images = transform_images(images, resolution=resolution_dict[model_name])
527
 
 
145
 
146
  mobilesam = MobileSAM()
147
 
148
+ @spaces.GPU(duration=30)
149
  def image_mobilesam_feature(
150
  images,
151
  node_type="block",
 
248
 
249
  sam = SAM()
250
 
251
+ @spaces.GPU(duration=60)
252
  def image_sam_feature(
253
  images,
254
  node_type="block",
 
329
 
330
  dinov2 = DiNOv2()
331
 
332
+ @spaces.GPU(duration=30)
333
  def image_dino_feature(images, node_type="block", layer=-1):
334
 
335
  if USE_CUDA:
 
427
 
428
  clip = CLIP()
429
 
430
+ @spaces.GPU(duration=30)
431
  def image_clip_feature(
432
  images, node_type="block", layer=-1
433
  ):
 
499
  return hasher.hexdigest()
500
 
501
 
 
502
  def run_model_on_image(images, model_name="sam", node_type="block", layer=-1):
503
  global USE_CUDA
504
  USE_CUDA = True
 
519
  USE_CUDA = False
520
  return result
521
 
522
+ def extract_features(images, model_name="MobileSAM", node_type="block", layer=-1):
523
  resolution_dict = {
524
+ "MobileSAM": (1024, 1024),
525
+ "SAM(sam_vit_b)": (1024, 1024),
526
+ "DiNO(dinov2_vitb14_reg)": (448, 448),
527
+ "CLIP(openai/clip-vit-base-patch16)": (224, 224),
528
  }
529
  images = transform_images(images, resolution=resolution_dict[model_name])
530