Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Human Action Recognition
|
2 |
+
|
3 |
+
## LOADING MODULES
|
4 |
+
|
5 |
+
|
6 |
+
from transformers import pipeline
|
7 |
+
from PIL import Image
|
8 |
+
import requests
|
9 |
+
|
10 |
+
pipe = pipeline("image-classification", "rvv-karma/Human-Action-Recognition-VIT-Base-patch16-224")
|
11 |
+
|
12 |
+
def classify_image(input):
|
13 |
+
predictions = pipe(image)
|
14 |
+
return {prediction["label"]: prediction["score"] for prediction in predictions}
|
15 |
+
|
16 |
+
|
17 |
+
# Output:
|
18 |
+
# [{'score': 0.9918079972267151, 'label': 'dancing'},
|
19 |
+
# {'score': 0.00207977625541389, 'label': 'clapping'},
|
20 |
+
# {'score': 0.0015223610680550337, 'label': 'running'},
|
21 |
+
# {'score': 0.0009153694845736027, 'label': 'fighting'},
|
22 |
+
# {'score': 0.0006987180095165968, 'label': 'sitting'}]
|
23 |
+
|
24 |
+
|
25 |
+
ex=[['cat2.jpg'],
|
26 |
+
['dog2.jpeg'],
|
27 |
+
['cat3.jpg'],
|
28 |
+
['dog.jpeg']]
|
29 |
+
|
30 |
+
"""
|
31 |
+
## RUNNING WEB UI"""
|
32 |
+
|
33 |
+
image = gr.inputs.Image()
|
34 |
+
label = gr.outputs.Label(num_top_classes=5)
|
35 |
+
|
36 |
+
gr.Interface(fn=classify_image, inputs=image, outputs=label, title='Human Action Recognition',
|
37 |
+
height=600, width=1200, examples=ex, theme='peach').launch(debug=True)
|
38 |
+
|