assiri commited on
Commit
e0bdc9b
·
1 Parent(s): 32ef33f

Upload 7 files

Browse files
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Ia Vit
3
- emoji: 🏃
4
- colorFrom: red
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.47.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: ViT ImageNet Classification
3
+ emoji: 🔥
4
+ colorFrom: pink
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 3.16.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import ViTFeatureExtractor, ViTForImageClassification
3
+
4
+ import torch
5
+ import gradio as gr
6
+
7
+ from PIL import Image
8
+
9
+ feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224')
10
+ model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
11
+
12
+ import os, glob
13
+
14
+ examples_dir = './samples'
15
+ example_files = glob.glob(os.path.join(examples_dir, '*.jpg'))
16
+
17
+ def classify_image(image):
18
+
19
+ with torch.no_grad():
20
+ model.eval()
21
+
22
+ inputs = feature_extractor(images=image, return_tensors="pt")
23
+ outputs = model(**inputs)
24
+
25
+ logits = outputs.logits
26
+ prob = torch.nn.functional.softmax(logits, dim=1)
27
+
28
+ top10_prob, top10_indices = torch.topk(prob, 10)
29
+ top10_confidences = {}
30
+ for i in range(10):
31
+ top10_confidences[model.config.id2label[int(top10_indices[0][i])]] = float(top10_prob[0][i])
32
+
33
+ return top10_confidences #confidences
34
+
35
+
36
+ with gr.Blocks(title="ViT ImageNet Classification - ClassCat",
37
+ css=".gradio-container {background:mintcream;}"
38
+ ) as demo:
39
+ gr.HTML("""<div style="font-family:'Times New Roman', 'Serif'; font-size:16pt; font-weight:bold; text-align:center; color:royalblue;">ViT - ImageNet Classification</div>""")
40
+
41
+ with gr.Row():
42
+ input_image = gr.Image(type="pil", image_mode="RGB", shape=(224, 224))
43
+ output_label=gr.Label(label="Probabilities", num_top_classes=3)
44
+
45
+ send_btn = gr.Button("Infer")
46
+ send_btn.click(fn=classify_image, inputs=input_image, outputs=output_label)
47
+
48
+ with gr.Row():
49
+ gr.Examples(['./samples/cat.jpg'], label='Sample images : cat', inputs=input_image)
50
+ gr.Examples(['./samples/cheetah.jpg'], label='cheetah', inputs=input_image)
51
+ gr.Examples(['./samples/hotdog.jpg'], label='hotdog', inputs=input_image)
52
+ gr.Examples(['./samples/lion.jpg'], label='lion', inputs=input_image)
53
+ #gr.Examples(example_files, inputs=input_image)
54
+
55
+ #demo.queue(concurrency_count=3)
56
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch
samples/cat.jpg ADDED
samples/cheetah.jpg ADDED
samples/hotdog.jpg ADDED
samples/lion.jpg ADDED