keyishen commited on
Commit
e599954
1 Parent(s): 695e16e

add a model

Browse files

laion/CLIP-ViT-H-14-laion2B-s32B-b79K

Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -6,6 +6,10 @@ model = CLIPModel.from_pretrained(clip_path).eval()
6
  processor = AutoProcessor.from_pretrained(clip_path)
7
 
8
 
 
 
 
 
9
  async def predict(init_image, labels_level1):
10
  if init_image is None:
11
  return "", ""
@@ -16,13 +20,19 @@ async def predict(init_image, labels_level1):
16
  inputs = processor(
17
  text=split_labels, images=init_image, return_tensors="pt", padding=True
18
  )
19
-
 
 
 
20
  outputs = model(**inputs)
 
21
  logits_per_image = outputs.logits_per_image # this is the image-text similarity score
22
 
 
23
 
24
  for i in range(len(split_labels)):
25
- ret_str += split_labels[i] + ": " + str(logits_per_image[0][i]) + "\n"
 
26
 
27
  return ret_str, ret_str
28
 
 
6
  processor = AutoProcessor.from_pretrained(clip_path)
7
 
8
 
9
+ clip_path2 = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
10
+ model2 = CLIPModel.from_pretrained(clip_path2).eval()
11
+ processor2 = AutoProcessor.from_pretrained(clip_path2)
12
+
13
  async def predict(init_image, labels_level1):
14
  if init_image is None:
15
  return "", ""
 
20
  inputs = processor(
21
  text=split_labels, images=init_image, return_tensors="pt", padding=True
22
  )
23
+
24
+ inputs2 = processor2(
25
+ text=split_labels, images=init_image, return_tensors="pt", padding=True
26
+ )
27
  outputs = model(**inputs)
28
+ outputs2 = model2(**inputs2)
29
  logits_per_image = outputs.logits_per_image # this is the image-text similarity score
30
 
31
+ logits_per_image2 = outputs2.logits_per_image # this is the image-text similarity score
32
 
33
  for i in range(len(split_labels)):
34
+ ret_str += split_labels[i] + ": " + str(float(logits_per_image[0][i]))
35
+ + ", " + str(float(logits_per_image2[0][i])) + "\n"
36
 
37
  return ret_str, ret_str
38