Spaces:
Runtime error
Runtime error
my second app!
Browse files- app.py +22 -0
- black_panter.jpg +0 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import CLIPProcessor, CLIPModel
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
5 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
6 |
+
|
7 |
+
classes = ["Iron Man", "Captain America", "Thor", "Spider-Man", "Black Widow", "Black Panther","Hulk", "Ant-Man",
|
8 |
+
'Peggy Carter', "Daredevil", "Star-Lord", "Wong", "Doctor Strange","Nick Fury", "Gamora", "Jessica Jones",
|
9 |
+
"Nebula", "Falcon", "Winter Soldier", "Rocket", "Hawkeye"]
|
10 |
+
text = [f"a photo of {x}" for x in classes]
|
11 |
+
def predict(img):
|
12 |
+
inputs = processor(text=text, images=img, return_tensors="pt", padding=True)
|
13 |
+
outputs = model(**inputs)
|
14 |
+
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
15 |
+
probs = logits_per_image.softmax(dim=1).squeeze() # we can take the softmax to get the label probabilities
|
16 |
+
return {classes[i] : float(probs[i]) for i in range(len(probs))}
|
17 |
+
|
18 |
+
title = "Marvel Heroes Classification"
|
19 |
+
description = "Using clip for zero-shot classification"
|
20 |
+
examples = ["black_panter.jpg"]
|
21 |
+
gr.Interface(fn=predict, inputs = gr.inputs.Image(shape = (512,512)), outputs= gr.outputs.Label(),
|
22 |
+
examples=examples, title=title, description=description).launch(share=True)
|
black_panter.jpg
ADDED
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
gradio
|