diegulio commited on
Commit
3be99bb
·
1 Parent(s): 9689840

🐶🧡🐱

Browse files
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from app.model import PetClassificationModel
4
+ from app.backbone import Backbone
5
+ from app.config import CFG
6
+ from torchvision import transforms
7
+
8
+ # Load model
9
+ backbone = Backbone(CFG.MODEL, len(CFG.idx_to_class), pretrained = CFG.PRETRAINED)
10
+ model = PetClassificationModel(base_model = backbone.model, config = CFG)
11
+ model.load_state_dict(torch.load('models/best_model.pt'))
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+
14
+ # Eval mode
15
+ model.eval()
16
+
17
+ model.to(device)
18
+
19
+
20
+ pred_transforms = transforms.Compose([
21
+ transforms.Resize(CFG.IMG_SIZE),
22
+ transforms.ToTensor(),
23
+ ])
24
+
25
+ def predict(x):
26
+ x = pred_transforms(x).unsqueeze(0) # transform and batched
27
+ x = x.to(device)
28
+
29
+ with torch.no_grad():
30
+ prediction = torch.nn.functional.softmax(model(x)[0], dim=0)
31
+ confidences = {CFG.idx_to_class[i]: float(prediction[i]) for i in range(len(CFG.idx_to_class))}
32
+
33
+ return confidences
34
+
35
+ gr.Interface(fn=predict,
36
+ inputs=gr.Image(type="pil"),
37
+ outputs=gr.Label(num_top_classes=5),
38
+ examples=["statics/pug.jpg", "statics/poodle.jpg", "statics/cat.jpg", "statics/no.jpg"]).launch()
app/backbone.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import timm
2
+ from dataclasses import dataclass
3
+
4
+ class Backbone:
5
+ def __init__(self, model, num_classes, pretrained = True):
6
+ self.model = timm.create_model(model, pretrained = pretrained, num_classes = num_classes)
app/config.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ class CFG:
4
+ LABEL_PATH = 'data/labels.csv'
5
+
6
+ labels = pd.read_csv(LABEL_PATH)
7
+ idx_to_class = dict(enumerate(labels.breed.unique()))
8
+ class_to_idx = {c:i for i,c in idx_to_class.items()}
9
+
10
+
11
+ # Model related
12
+ MODEL = 'inception_v4'
13
+ PRETRAINED = True
14
+ IMG_SIZE = (299, 299) # Depends in base model
app/model.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from torch import nn
4
+
5
+
6
+ import lightning as L
7
+
8
+ import torch.nn.functional as F
9
+ from torch import optim
10
+ from torchmetrics import Accuracy
11
+
12
+ from torch.optim.lr_scheduler import ReduceLROnPlateau
13
+
14
+
15
+
16
+ class PetClassificationModel(L.LightningModule):
17
+ def __init__(self, base_model, config):
18
+ super().__init__()
19
+ self.config = config
20
+ self.num_classes = len(self.config.idx_to_class)
21
+ metric = Accuracy(task="multiclass", num_classes=self.num_classes)
22
+ self.train_acc = metric.clone()
23
+ self.val_acc = metric.clone()
24
+ self.test_acc = metric.clone()
25
+ self.training_step_outputs = []
26
+ self.validation_step_outputs = []
27
+ self.test_step_outputs = []
28
+ self.device_ = torch.device("cuda" if torch.cuda.is_available() else "cpu")
29
+
30
+ self.pretrained_model = base_model
31
+ out_features = self.pretrained_model.get_classifier().out_features
32
+ self.custom_layers = nn.Sequential(
33
+ nn.Linear(out_features, 512, device = self.device_),
34
+ nn.ReLU(),
35
+ nn.Dropout(),
36
+ nn.Linear(512, self.num_classes, device = self.device_),
37
+ )
38
+
39
+ def forward(self, x):
40
+ x = self.pretrained_model(x)
41
+ #x = self.custom_layers(x)
42
+ return x
43
+
44
+
45
+ def training_step(self, batch, batch_idx):
46
+ x,y = batch
47
+ logits = self.forward(x) # -> logits
48
+ loss = F.cross_entropy(logits, y)
49
+ self.log_dict({'train_loss': loss})
50
+ self.training_step_outputs.append({'loss': loss, 'logits': logits, 'y':y})
51
+ return loss
52
+
53
+ def on_train_epoch_end(self):
54
+ # Concat batches
55
+ outputs = self.training_step_outputs
56
+ logits = torch.cat([x['logits'] for x in outputs])
57
+ y = torch.cat([x['y'] for x in outputs])
58
+ self.train_acc(logits, y)
59
+ self.log_dict({
60
+ 'train_acc': self.train_acc,
61
+ },
62
+ on_step = False,
63
+ on_epoch = True,
64
+ prog_bar = True)
65
+ self.training_step_outputs.clear()
66
+
67
+ def validation_step(self, batch, batch_idx):
68
+ x,y = batch
69
+ logits = self.forward(x)
70
+ loss = F.cross_entropy(logits, y)
71
+ self.log_dict({'val_loss': loss})
72
+ self.validation_step_outputs.append({'loss': loss, 'logits': logits, 'y':y})
73
+ return loss
74
+
75
+ def on_validation_epoch_end(self):
76
+ # Concat batches
77
+ outputs = self.validation_step_outputs
78
+ logits = torch.cat([x['logits'] for x in outputs])
79
+ y = torch.cat([x['y'] for x in outputs])
80
+ self.val_acc(logits, y)
81
+ self.log_dict({
82
+ 'val_acc': self.val_acc,
83
+ },
84
+ on_step = False,
85
+ on_epoch = True,
86
+ prog_bar = True)
87
+ self.validation_step_outputs.clear()
88
+
89
+ def test_step(self, batch, batch_idx):
90
+ x,y = batch
91
+ logits = self.forward(x)
92
+ loss = F.cross_entropy(logits, y)
93
+ self.log_dict({'test_loss': loss})
94
+ self.test_step_outputs.append({'loss': loss, 'logits': logits, 'y':y})
95
+ return loss
96
+
97
+ def on_test_epoch_end(self):
98
+ # Concat batches
99
+ outputs = self.test_step_outputs
100
+ logits = torch.cat([x['logits'] for x in outputs])
101
+ y = torch.cat([x['y'] for x in outputs])
102
+ self.test_acc(logits, y)
103
+ self.log_dict({
104
+ 'test_acc': self.test_acc,
105
+ },
106
+ on_step = False,
107
+ on_epoch = True,
108
+ prog_bar = True)
109
+ self.test_step_outputs.clear()
110
+
111
+ def predict_step(self, batch):
112
+ x, y = batch
113
+ return self.model(x, y)
114
+
115
+ def configure_optimizers(self):
116
+ optimizer = optim.Adam(self.parameters(), lr=self.config.LEARNING_RATE)
117
+ lr_scheduler = ReduceLROnPlateau(optimizer, mode = 'min', patience = 3)
118
+ lr_scheduler_dict = {
119
+ "scheduler": lr_scheduler,
120
+ "interval": "epoch",
121
+ "monitor": "val_loss",
122
+ }
123
+ return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler_dict}
data/labels.csv ADDED
The diff for this file is too large to render. See raw diff
 
model/best_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cfcd4a15303233e194dfa4ba9945be1a1bfcb004f5e05677a53b5684ccf3933
3
+ size 166425282
requirements.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio==3.50.2
2
+ gradio_client==0.6.1
3
+ huggingface-hub==0.18.0
4
+ lightning==2.1.0
5
+ lightning-utilities==0.9.0
6
+ mypy-extensions==1.0.0
7
+ numpy==1.25.2
8
+ pandas==2.1.1
9
+ Pillow==10.1.0
10
+ python-dateutil==2.8.2
11
+ python-multipart==0.0.6
12
+ pytorch-lightning==2.1.0
13
+ rpds-py==0.10.6
14
+ safetensors==0.4.0
15
+ scikit-learn==1.3.1
16
+ scipy==1.9.3
17
+ timm==0.9.7
18
+ torch==2.1.0
19
+ torchmetrics==1.2.0
20
+ torchvision==0.16.0
21
+
statics/cat.jpg ADDED
statics/no.jpg ADDED
statics/poodle.jpg ADDED
statics/pug.jpg ADDED