= commited on
Commit
cb5412b
1 Parent(s): df0efe6

initial commit

Browse files
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ venv/
2
+ venv
3
+ __pycache__
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torchvision.models import efficientnet_v2_s, EfficientNet_V2_S_Weights
4
+
5
+ from torch import nn
6
+
7
+ from PIL import Image
8
+ from model import create_effnet_v2_model
9
+
10
+ class_names = ['Honda', 'Hyundai', 'Toyota']
11
+
12
+ effnet_v2, transforms = create_effnet_v2_model(num_classes=len(class_names), weights_path="efficient_net_s_carvision_3.pth")
13
+
14
+ def predict(model, image_path, device):
15
+
16
+ image = Image.open(image_path)
17
+ image = transforms(image).unsqueeze(0)
18
+ image = image.to(device)
19
+ output = model(image)
20
+
21
+ model.eval()
22
+ with torch.inference_mode():
23
+ probs = torch.softmax(output, dim=1)
24
+
25
+ pred_labels_and_probs = {class_names[i]: float(probs[0, i]) for i in range(len(class_names))}
26
+
27
+
28
+ return pred_labels_and_probs
29
+
30
+ print(predict(effnet_v2, "examples/Toyota_Tacoma_2017_36_18_270_35_6_75_70_212_19_RWD_5_4_Pickup_xQa.jpg", torch.device("cpu")))
31
+
32
+ # print(predict(effnet_v2, "test.jpg", torch.device("cuda:0")))
efficient_net_s_carvision_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ca71eab330a5ea4163fea0751b4c6551bfaf82461ebb751e6128cf3afb4b738
3
+ size 81639653
examples/Honda_Civic_2011_16_15_140_18_4_68_55_175_25_FWD_5_2_2dr_azJ.jpg ADDED
examples/Honda_Civic_2011_16_15_140_18_4_68_55_175_25_FWD_5_2_2dr_zFd.jpg ADDED
examples/Honda_Pilot_2014_31_18_250_35_6_78_70_191_nan_FWD_8_4_SUV_HUH.jpg ADDED
examples/Hyundai_Accent_2011_13_14_110_16_4_66_57_159_27_FWD_5_2_2dr_EtC.jpg ADDED
examples/Hyundai_Sonata_2015_23_16_170_16_4_73_58_191_28_FWD_5_4_4dr_Qxx.jpg ADDED
examples/Hyundai_Sonata_2017_21_16_180_24_4_73_58_191_25_FWD_5_4_4dr_CNL.jpg ADDED
examples/Toyota_Camry_2019_28_16_200_25_4_72_56_192_51_FWD_5_4_4dr_CHX.jpg ADDED
examples/Toyota_Tacoma_2017_36_18_270_35_6_75_70_212_19_RWD_5_4_Pickup_xQa.jpg ADDED
examples/Toyota_Yaris_2012_14_15_100_15_4_66_59_153_30_FWD_5_2_2dr_Fzo.jpg ADDED
model.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+
4
+ from torchvision.models import efficientnet_v2_s, EfficientNet_V2_S_Weights
5
+ from torch import nn
6
+
7
+
8
+ def create_effnet_v2_model(weights_path, num_classes=3):
9
+ weights = EfficientNet_V2_S_Weights.DEFAULT
10
+ transforms = weights.transforms()
11
+
12
+ model = efficientnet_v2_s()
13
+ model.classifier = nn.Sequential(
14
+ nn.Dropout(0.0),
15
+ nn.Linear(in_features=1280, out_features=num_classes)
16
+ )
17
+ model.load_state_dict(torch.load(f = weights_path, map_location=torch.device("cpu")))
18
+
19
+ return model, transforms