File size: 1,278 Bytes
8415643
90ca1cc
 
 
 
 
3f49fac
90ca1cc
334b52c
 
 
 
 
 
90ca1cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
from torchvision import transforms
from huggingface_hub import hf_hub_download

REPO_ID = "Kr1n3/Fashion-Items-Classification"
FILENAME = "best.pt"

yolov5_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)

model = torch.hub.load('ultralytics/yolov5', 'custom', path=yolov5_weights, force_reload=True)
data_transform1=transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
])

title = " Fashion Items Classification"

examples=[['https://github.com/Kr1n3/MPC_2022/blob/main/dataset/bag_14.JPG?raw=true'],['https://github.com/Kr1n3/MPC_2022/blob/main/dataset/dress_45.JPG?raw=true'],['https://github.com/Kr1n3/MPC_2022/blob/main/dataset/pants_30.jpeg?raw=true']]

classes=['Bag','Dress','Pants','Shoes','Skirt']
def predict(img):
  imag=data_transform1(img)
  inp =imag.unsqueeze(0)
  outputs=model(inp)
  pred=F.softmax(outputs[0], dim=0).cpu().data.numpy()
  confidences = {classes[i]:(float(pred[i])) for i in range(5)}    
  return confidences

gr.Interface(predict,gr.inputs.Image(type='pil'),title=title,examples=examples,outputs='label').launch(debug=True)