Spaces:
No application file
No application file
File size: 1,630 Bytes
93ad788 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
# Downloading files from the demo repo
import os
os.mkdir('images')
!wget -q -O images/jokowi.jpeg https://cdn.setneg.go.id/_multimedia/photo/20220218/5008WhatsApp_Image_2022-02-18_at_1.36.50_PM.jpeg
!wget -q -O images/megawati.jpeg https://gallery.poskota.co.id/storage/Foto/Foto_20220602_205953_hql.jpeg
!wget -q -O images/cipung.jpg https://cdn.idntimes.com/content-images/community/2022/11/rayyanza-695b5fc766d9ed00ece029dcd8177b8e-4c74e93112d56ab97dac735945a7a619_600x400.jpg
import gradio as gr
from PIL import Image
from transformers import pipeline
# import the model
pipe_age = pipeline("image-classification", model="nateraw/vit-age-classifier")
pipe_emotion = pipeline("image-classification", model="ahyar002/emotion_classification")
def age_prediction(image):
# convert to PIL image
pil_image = Image.fromarray(image)
# predict the image
predict_age = pipe_age(pil_image)
predict_emotion = pipe_emotion(pil_image)
# tranform the ouput into dictionary
transformed_dict_age = {item['label']: item['score'] for item in predict_age}
transformed_dict_emotion = {item['label']: item['score'] for item in predict_emotion}
return transformed_dict_age, transformed_dict_emotion
demo = gr.Interface(age_prediction,
inputs = "image",
outputs= [gr.Label(num_top_classes=3), gr.Label(num_top_classes=3)],
examples=[
os.path.join(os.path.abspath(''), "images/jokowi.jpeg"),
os.path.join(os.path.abspath(''), "images/megawati.jpeg"),
os.path.join(os.path.abspath(''), "images/cipung.jpg"),
],
)
if __name__ == "__main__":
demo.launch(debug=True) |