Jfink09 commited on
Commit
12300c6
·
1 Parent(s): e850c77

Upload resnet50_deepfundus.py

Browse files

DeepFundus trained on ResNet50.

Files changed (1) hide show
  1. resnet50_deepfundus.py +603 -0
resnet50_deepfundus.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """ResNet50_DeepFundus.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1pd56CapAEjZ8AHAW5bi0uMm6ZzJlOpDZ
8
+ """
9
+
10
+ ######################################################### Use block of code if dataset is on GitHub #######################################################
11
+ # import os
12
+ # import requests
13
+ # import zipfile
14
+ # from pathlib import Path
15
+
16
+ # # Setup path to data folder
17
+ # data_path = Path("data/")
18
+ # image_path = data_path / "deepfundus"
19
+
20
+ # # If the image folder doesn't exist, download it and prepare it...
21
+ # if image_path.is_dir():
22
+ # print(f"{image_path} directory exists.")
23
+ # else:
24
+ # print(f"Did not find {image_path} directory, creating one...")
25
+ # image_path.mkdir(parents=True, exist_ok=True)
26
+
27
+ # # Download fundus data
28
+ # with open(data_path / "deepfundus.zip", "wb") as f:
29
+ # request = requests.get("https://github.com/jfink09/DeepFundus/raw/main/deepfundus.zip")
30
+ # print("Downloading fundus data...")
31
+ # f.write(request.content)
32
+
33
+ # # Unzip fundus data
34
+ # with zipfile.ZipFile(data_path / "deepfundus.zip", "r") as zip_ref:
35
+ # print("Unzipping fundus data...")
36
+ # zip_ref.extractall(image_path)
37
+
38
+ # # Remove zip file
39
+ # os.remove(data_path / "deepfundus.zip")
40
+
41
+ ######################################### Use commented out code if dataset was downloaded from GitHub ######################################################
42
+ # # Setup train and testing paths
43
+ # train_dir = image_path / "train"
44
+ # test_dir = image_path / "test"
45
+
46
+ # train_dir, test_dir
47
+ from pathlib import Path
48
+
49
+ # Setup train and testing paths
50
+ train_dir = Path("drive/MyDrive/data/train")
51
+ test_dir = Path("drive/MyDrive/data/test")
52
+
53
+ train_dir, test_dir
54
+
55
+ from torchvision import datasets, transforms
56
+
57
+ # Create simple transform
58
+ data_transform = transforms.Compose([
59
+ transforms.Resize((64, 64)),
60
+ transforms.ToTensor(),
61
+ ])
62
+
63
+ # Use ImageFolder to create dataset(s)
64
+ train_data = datasets.ImageFolder(root=train_dir, # target folder of images
65
+ transform=data_transform, # transforms to perform on data (images)
66
+ target_transform=None) # transforms to perform on labels (if necessary)
67
+
68
+ test_data = datasets.ImageFolder(root=test_dir,
69
+ transform=data_transform)
70
+
71
+ print(f"Train data:\n{train_data}\nTest data:\n{test_data}")
72
+
73
+ # For this notebook to run with updated APIs, we need torch 1.12+ and torchvision 0.13+
74
+ try:
75
+ import torch
76
+ import torchvision
77
+ assert int(torch.__version__.split(".")[1]) >= 12, "torch version should be 1.12+"
78
+ assert int(torchvision.__version__.split(".")[1]) >= 13, "torchvision version should be 0.13+"
79
+ print(f"torch version: {torch.__version__}")
80
+ print(f"torchvision version: {torchvision.__version__}")
81
+ except:
82
+ print(f"[INFO] torch/torchvision versions not as required, installing nightly versions.")
83
+ !pip3 install -U torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
84
+ import torch
85
+ import torchvision
86
+ print(f"torch version: {torch.__version__}")
87
+ print(f"torchvision version: {torchvision.__version__}")
88
+
89
+ # Continue with regular imports
90
+ import matplotlib.pyplot as plt
91
+ import torch
92
+ import torchvision
93
+
94
+ from torch import nn
95
+ from torchvision import transforms
96
+
97
+ # Try to get torchinfo, install it if it doesn't work
98
+ try:
99
+ from torchinfo import summary
100
+ except:
101
+ print("[INFO] Couldn't find torchinfo... installing it.")
102
+ !pip install -q torchinfo
103
+ from torchinfo import summary
104
+
105
+ # Try to import the going_modular directory, download it from GitHub if it doesn't work
106
+ try:
107
+ from going_modular.going_modular import data_setup, engine
108
+ except:
109
+ # Get the going_modular scripts
110
+ print("[INFO] Couldn't find going_modular scripts... downloading them from GitHub.")
111
+ !git clone https://github.com/jfink09/optical-funduscopic-convolutional-neural-network
112
+ !mv optical-funduscopic-convolutional-neural-network/going_modular .
113
+ !rm -rf optical-funduscopic-convolutional-neural-network
114
+ from going_modular.going_modular import data_setup, engine
115
+
116
+ # Setup device agnostic code
117
+ device = "cuda" if torch.cuda.is_available() else "cpu"
118
+ device
119
+
120
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
121
+ std=[0.229, 0.224, 0.225])
122
+
123
+ # Create a transforms pipeline manually (required for torchvision < 0.13)
124
+ manual_transforms = transforms.Compose([
125
+ transforms.Resize((224, 224)), # 1. Reshape all images to 224x224 (though some models may require different sizes)
126
+ transforms.ToTensor(), # 2. Turn image values to between 0 & 1
127
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], # 3. A mean of [0.485, 0.456, 0.406] (across each colour channel)
128
+ std=[0.229, 0.224, 0.225]) # 4. A standard deviation of [0.229, 0.224, 0.225] (across each colour channel),
129
+ ])
130
+
131
+ # Create training and testing DataLoaders as well as get a list of class names
132
+ train_dataloader, test_dataloader, class_names = data_setup.create_dataloaders(train_dir=train_dir,
133
+ test_dir=test_dir,
134
+ transform=manual_transforms, # resize, convert images to between 0 & 1 and normalize them
135
+ batch_size=32) # set mini-batch size to 32
136
+
137
+ train_dataloader, test_dataloader, class_names
138
+
139
+ # Get a set of pretrained model weights
140
+ weights = torchvision.models.ResNet50_Weights.DEFAULT # .DEFAULT = best available weights from pretraining on ImageNet
141
+ weights
142
+
143
+ # Get the transforms used to create our pretrained weights
144
+ auto_transforms = weights.transforms()
145
+ auto_transforms
146
+
147
+ # # Create training and testing DataLoaders as well as get a list of class names
148
+ # train_dataloader, test_dataloader, class_names = data_setup.create_dataloaders(train_dir=train_dir,
149
+ # test_dir=test_dir,
150
+ # transform=auto_transforms, # perform same data transforms on our own data as the pretrained model
151
+ # batch_size=32) # set mini-batch size to 32
152
+
153
+ # train_dataloader, test_dataloader, class_names
154
+
155
+ # OLD: Setup the model with pretrained weights and send it to the target device (this was prior to torchvision v0.13)
156
+ # model = torchvision.models.efficientnet_b0(pretrained=True).to(device) # OLD method (with pretrained=True)
157
+
158
+ # NEW: Setup the model with pretrained weights and send it to the target device (torchvision v0.13+)
159
+ weights = torchvision.models.ResNet50_Weights.DEFAULT # .DEFAULT = best available weights
160
+ model = torchvision.models.resnet50(weights=weights).to(device)
161
+
162
+ #model # uncomment to output (it's very long)
163
+
164
+ # Print a summary using torchinfo (uncomment for actual output)
165
+ summary(model=model,
166
+ input_size=(32, 3, 224, 224), # make sure this is "input_size", not "input_shape"
167
+ # col_names=["input_size"], # uncomment for smaller output
168
+ col_names=["input_size", "output_size", "num_params", "trainable"],
169
+ col_width=20,
170
+ row_settings=["var_names"]
171
+ )
172
+
173
+ # Set the manual seeds
174
+ torch.manual_seed(42)
175
+ torch.cuda.manual_seed(42)
176
+
177
+ # Get the length of class_names (one output unit for each class)
178
+ output_shape = len(class_names)
179
+
180
+ # Recreate the classifier layer and seed it to the target device
181
+ model.classifier = torch.nn.Sequential(
182
+ torch.nn.Dropout(p=0.2, inplace=True),
183
+ torch.nn.Linear(in_features=2048,
184
+ out_features=output_shape, # same number of output units as our number of classes
185
+ bias=True)).to(device)
186
+
187
+ # Define loss and optimizer
188
+ loss_fn = nn.CrossEntropyLoss()
189
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
190
+
191
+ # Set the random seeds
192
+ torch.manual_seed(42)
193
+ torch.cuda.manual_seed(42)
194
+
195
+ # Start the timer
196
+ from timeit import default_timer as timer
197
+ start_time = timer()
198
+
199
+ # Setup training and save the results
200
+ results = engine.train(model=model,
201
+ train_dataloader=train_dataloader,
202
+ test_dataloader=test_dataloader,
203
+ optimizer=optimizer,
204
+ loss_fn=loss_fn,
205
+ epochs=20,
206
+ device=device)
207
+
208
+ # End the timer and print out how long it took
209
+ end_time = timer()
210
+ print(f"[INFO] Total training time: {end_time-start_time:.3f} seconds")
211
+
212
+ # Get the plot_loss_curves() function from helper_functions.py, download the file if we don't have it
213
+ try:
214
+ from helper_functions import plot_loss_curves
215
+ except:
216
+ print("[INFO] Couldn't find helper_functions.py, downloading...")
217
+ with open("helper_functions.py", "wb") as f:
218
+ import requests
219
+ request = requests.get("https://github.com/jfink09/optical-funduscopic-convolutional-neural-network/raw/main/helper_functions.py")
220
+ f.write(request.content)
221
+ from helper_functions import plot_loss_curves
222
+
223
+ # Plot the loss curves of our model
224
+ plot_loss_curves(results)
225
+
226
+ from typing import List, Tuple
227
+
228
+ from PIL import Image
229
+
230
+ # 1. Take in a trained model, class names, image path, image size, a transform and target device
231
+ def pred_and_plot_image(model: torch.nn.Module,
232
+ image_path: str,
233
+ class_names: List[str],
234
+ image_size: Tuple[int, int] = (224, 224),
235
+ transform: torchvision.transforms = None,
236
+ device: torch.device=device):
237
+
238
+
239
+ # 2. Open image
240
+ img = Image.open(image_path)
241
+
242
+ # 3. Create transformation for image (if one doesn't exist)
243
+ if transform is not None:
244
+ image_transform = transform
245
+ else:
246
+ image_transform = transforms.Compose([
247
+ transforms.Resize(image_size),
248
+ transforms.ToTensor(),
249
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
250
+ std=[0.229, 0.224, 0.225]),
251
+ ])
252
+
253
+ ### Predict on image ###
254
+
255
+ # 4. Make sure the model is on the target device
256
+ model.to(device)
257
+
258
+ # 5. Turn on model evaluation mode and inference mode
259
+ model.eval()
260
+ with torch.inference_mode():
261
+ # 6. Transform and add an extra dimension to image (model requires samples in [batch_size, color_channels, height, width])
262
+ transformed_image = image_transform(img).unsqueeze(dim=0)
263
+
264
+ # 7. Make a prediction on image with an extra dimension and send it to the target device
265
+ target_image_pred = model(transformed_image.to(device))
266
+
267
+ # 8. Convert logits -> prediction probabilities (using torch.softmax() for multi-class classification)
268
+ target_image_pred_probs = torch.softmax(target_image_pred, dim=1)
269
+
270
+ # 9. Convert prediction probabilities -> prediction labels
271
+ target_image_pred_label = torch.argmax(target_image_pred_probs, dim=1)
272
+
273
+ # 10. Plot image with predicted label and probability
274
+ plt.figure()
275
+ plt.imshow(img)
276
+ plt.title(f"Pred: {class_names[target_image_pred_label]} | Prob: {target_image_pred_probs.max():.3f}")
277
+ plt.axis(False);
278
+
279
+ # Get a random list of image paths from test set
280
+ import random
281
+ num_images_to_plot = 3
282
+ test_image_path_list = list(Path(test_dir).glob("*/*.jpg")) # get list all image paths from test data
283
+ test_image_path_sample = random.sample(population=test_image_path_list, # go through all of the test image paths
284
+ k=num_images_to_plot) # randomly select 'k' image paths to pred and plot
285
+
286
+ # Make predictions on and plot the images
287
+ for image_path in test_image_path_sample:
288
+ pred_and_plot_image(model=model,
289
+ image_path=image_path,
290
+ class_names=class_names,
291
+ # transform=weights.transforms(), # optionally pass in a specified transform from our pretrained model weights
292
+ image_size=(224, 224))
293
+
294
+ data_path = Path("data/")
295
+ image_path = data_path / "deepfundus"
296
+
297
+ # If the image folder doesn't exist, download it and prepare it...
298
+ if image_path.is_dir():
299
+ print(f"{image_path} directory exists.")
300
+ else:
301
+ print(f"Did not find {image_path} directory, creating one...")
302
+ image_path.mkdir(parents=True, exist_ok=True)
303
+
304
+ # Import/install Gradio
305
+ try:
306
+ import gradio as gr
307
+ except:
308
+ !pip -q install gradio
309
+ import gradio as gr
310
+
311
+ print(f"Gradio version: {gr.__version__}")
312
+
313
+ from google.colab import drive
314
+ drive.mount('/content/drive')
315
+
316
+ # Put ResNet50 on CPU
317
+ model.to("cpu")
318
+
319
+ # Check the device
320
+ next(iter(model.parameters())).device
321
+
322
+ # 1. Setup pretrained ResNet50 weights
323
+ resnet50_weights = torchvision.models.ResNet50_Weights.DEFAULT
324
+
325
+ # 2. Get ResNet50 transforms
326
+ resnet50_transforms = resnet50_weights.transforms()
327
+
328
+ # 3. Setup pretrained model
329
+ resnet50 = torchvision.models.resnet50(weights=resnet50_weights) # could also use weights="DEFAULT"
330
+
331
+ # 4. Freeze the base layers in the model (this will freeze all layers to begin with)
332
+ for param in resnet50.parameters():
333
+ param.requires_grad = True # Set to False for model's other than ResNet
334
+
335
+ # 5. Update the classifier head
336
+ resnet50.classifier = nn.Sequential(
337
+ nn.Dropout(p=0.3, inplace=True), # keep dropout layer same
338
+ nn.Linear(in_features=2048, # keep in_features same
339
+ out_features=8)) # change out_features to suit our number of classes # 4
340
+
341
+ def create_resnet50_model(num_classes:int=8, # 4
342
+ seed:int=42):
343
+ """Creates an ResNet50 feature extractor model and transforms.
344
+
345
+ Args:
346
+ num_classes (int, optional): number of classes in the classifier head.
347
+ Defaults to 3.
348
+ seed (int, optional): random seed value. Defaults to 42.
349
+
350
+ Returns:
351
+ model (torch.nn.Module): ResNet50 feature extractor model.
352
+ transforms (torchvision.transforms): ResNet50 image transforms.
353
+ """
354
+ # 1, 2, 3. Create ResNet50 pretrained weights, transforms and model
355
+ weights = torchvision.models.ResNet50_Weights.DEFAULT
356
+ transforms = weights.transforms()
357
+ model = torchvision.models.resnet50(weights=weights)
358
+
359
+ # 4. Freeze all layers in base model
360
+ for param in model.parameters():
361
+ param.requires_grad = True # Set to False for model's other than ResNet
362
+
363
+ # 5. Change classifier head with random seed for reproducibility
364
+ torch.manual_seed(seed)
365
+ model.classifier = nn.Sequential(
366
+ nn.Dropout(p=0.3, inplace=True),
367
+ nn.Linear(in_features=2048
368
+ , out_features=num_classes), # If using EffnetB2 in_features = 1408, EffnetB0 in_features = 1280, if ResNet50 in_features = 2048
369
+ )
370
+
371
+ return model, transforms
372
+
373
+ resnet50, resnet50_transforms = create_resnet50_model(num_classes=8, # 4
374
+ seed=42)
375
+
376
+ from torchinfo import summary
377
+
378
+ # Print ResNet50 model summary (uncomment for full output)
379
+ summary(resnet50,
380
+ input_size=(1, 3, 224, 224),
381
+ col_names=["input_size", "output_size", "num_params", "trainable"],
382
+ col_width=20,
383
+ row_settings=["var_names"])
384
+
385
+ # Setup DataLoaders
386
+ from going_modular.going_modular import data_setup
387
+ train_dataloader_resnet50, test_dataloader_resnet50, class_names = data_setup.create_dataloaders(train_dir=train_dir,
388
+ test_dir=test_dir,
389
+ transform=resnet50_transforms,
390
+ batch_size=32)
391
+
392
+ from going_modular.going_modular import engine
393
+
394
+ # Setup optimizer
395
+ optimizer = torch.optim.Adam(params=resnet50.parameters(),
396
+ lr=1e-3)
397
+ # Setup loss function
398
+ loss_fn = torch.nn.CrossEntropyLoss()
399
+
400
+ # Set seeds for reproducibility and train the model
401
+ #set_seeds()
402
+ resnet50_results = engine.train(model=resnet50,
403
+ train_dataloader=train_dataloader_resnet50,
404
+ test_dataloader=test_dataloader_resnet50,
405
+ epochs=10,
406
+ optimizer=optimizer,
407
+ loss_fn=loss_fn,
408
+ device=device)
409
+
410
+ from helper_functions import plot_loss_curves
411
+
412
+ plot_loss_curves(resnet50_results)
413
+
414
+ from going_modular.going_modular import utils
415
+
416
+ # Save the model
417
+ utils.save_model(model=resnet50,
418
+ target_dir="models",
419
+ model_name="pretrained_resnet50_feature_extractor_drappcompressed.pth")
420
+
421
+ from pathlib import Path
422
+
423
+ # Get the model size in bytes then convert to megabytes
424
+ pretrained_resnet50_model_size = Path("models/pretrained_resnet50_feature_extractor_drappcompressed.pth").stat().st_size // (1024*1024) # division converts bytes to megabytes (roughly)
425
+ print(f"Pretrained ResNet50 feature extractor model size: {pretrained_resnet50_model_size} MB")
426
+
427
+ # Count number of parameters in ResNet50
428
+ resnet50_total_params = sum(torch.numel(param) for param in resnet50.parameters())
429
+ resnet50_total_params
430
+
431
+ # Create a dictionary with EffNetB0 statistics
432
+ resnet50_stats = {"test_loss": resnet50_results["test_loss"][-1],
433
+ "test_acc": resnet50_results["test_acc"][-1],
434
+ "number_of_parameters": resnet50_total_params,
435
+ "model_size (MB)": pretrained_resnet50_model_size}
436
+ resnet50_stats
437
+
438
+ from pathlib import Path
439
+
440
+ # Get all test data paths
441
+ print(f"[INFO] Finding all filepaths ending with '.jpg' in directory: {test_dir}")
442
+ test_data_paths = list(Path(test_dir).glob("*/*.jpg"))
443
+ test_data_paths[:5]
444
+
445
+ import pathlib
446
+ import torch
447
+
448
+ from PIL import Image
449
+ from timeit import default_timer as timer
450
+ from tqdm.auto import tqdm
451
+ from typing import List, Dict
452
+
453
+ # 1. Create a function to return a list of dictionaries with sample, truth label, prediction, prediction probability and prediction time
454
+ def pred_and_store(paths: List[pathlib.Path],
455
+ model: torch.nn.Module,
456
+ transform: torchvision.transforms,
457
+ class_names: List[str],
458
+ device: str = "cuda" if torch.cuda.is_available() else "cpu") -> List[Dict]:
459
+
460
+ # 2. Create an empty list to store prediction dictionaires
461
+ pred_list = []
462
+
463
+ # 3. Loop through target paths
464
+ for path in tqdm(paths):
465
+
466
+ # 4. Create empty dictionary to store prediction information for each sample
467
+ pred_dict = {}
468
+
469
+ # 5. Get the sample path and ground truth class name
470
+ pred_dict["image_path"] = path
471
+ class_name = path.parent.stem
472
+ pred_dict["class_name"] = class_name
473
+
474
+ # 6. Start the prediction timer
475
+ start_time = timer()
476
+
477
+ # 7. Open image path
478
+ img = Image.open(path).convert('RGB')
479
+
480
+ # 8. Transform the image, add batch dimension and put image on target device
481
+ transformed_image = transform(img).unsqueeze(0).to(device)
482
+
483
+ # 9. Prepare model for inference by sending it to target device and turning on eval() mode
484
+ model.to(device)
485
+ model.eval()
486
+
487
+ # 10. Get prediction probability, predicition label and prediction class
488
+ with torch.inference_mode():
489
+ pred_logit = model(transformed_image) # perform inference on target sample
490
+ pred_prob = torch.softmax(pred_logit, dim=1) # turn logits into prediction probabilities
491
+ pred_label = torch.argmax(pred_prob, dim=1) # turn prediction probabilities into prediction label
492
+ pred_class = class_names[pred_label.cpu()] # hardcode prediction class to be on CPU
493
+
494
+ # 11. Make sure things in the dictionary are on CPU (required for inspecting predictions later on)
495
+ pred_dict["pred_prob"] = round(pred_prob.unsqueeze(0).max().cpu().item(), 4)
496
+ pred_dict["pred_class"] = pred_class
497
+
498
+ # 12. End the timer and calculate time per pred
499
+ end_time = timer()
500
+ pred_dict["time_for_pred"] = round(end_time-start_time, 4)
501
+
502
+ # 13. Does the pred match the true label?
503
+ pred_dict["correct"] = class_name == pred_class
504
+
505
+ # 14. Add the dictionary to the list of preds
506
+ pred_list.append(pred_dict)
507
+
508
+ # 15. Return list of prediction dictionaries
509
+ return pred_list
510
+
511
+ # Make predictions across test dataset with ResNet50
512
+ resnet50_test_pred_dicts = pred_and_store(paths=test_data_paths,
513
+ model=resnet50,
514
+ transform=resnet50_transforms,
515
+ class_names=class_names,
516
+ device="cpu") # make predictions on CPU
517
+
518
+ # Inspect the first 2 prediction dictionaries
519
+ resnet50_test_pred_dicts[:2]
520
+
521
+ # Turn the test_pred_dicts into a DataFrame
522
+ import pandas as pd
523
+ resnet50_test_pred_df = pd.DataFrame(resnet50_test_pred_dicts)
524
+ resnet50_test_pred_df.head()
525
+
526
+ # Check number of correct predictions
527
+ resnet50_test_pred_df.correct.value_counts()
528
+
529
+ # Find the average time per prediction
530
+ resnet50_average_time_per_pred = round(resnet50_test_pred_df.time_for_pred.mean(), 4)
531
+ print(f"ResNet50 average time per prediction: {resnet50_average_time_per_pred} seconds")
532
+
533
+ # Add ResNet50 average prediction time to stats dictionary
534
+ resnet50_stats["time_per_pred_cpu"] = resnet50_average_time_per_pred
535
+ resnet50_stats
536
+
537
+ # Turn stat dictionaries into DataFrame
538
+ df = pd.DataFrame([resnet50_stats])
539
+
540
+ # Add column for model names
541
+ df["model"] = ["ResNet50"]
542
+
543
+ # Convert accuracy to percentages
544
+ df["test_acc"] = round(df["test_acc"] * 100, 2)
545
+
546
+ df
547
+
548
+ # Put ResNet50 on CPU
549
+ resnet50.to("cpu")
550
+
551
+ # Check the device
552
+ next(iter(resnet50.parameters())).device
553
+
554
+ from typing import Tuple, Dict
555
+
556
+ def predict(img) -> Tuple[Dict, float]:
557
+ """Transforms and performs a prediction on img and returns prediction and time taken.
558
+ """
559
+ # Start the timer
560
+ start_time = timer()
561
+
562
+ # Transform the target image and add a batch dimension
563
+ img = resnet50_transforms(img).unsqueeze(0)
564
+
565
+ # Put model into evaluation mode and turn on inference mode
566
+ resnet50.eval()
567
+ with torch.inference_mode():
568
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
569
+ pred_probs = torch.softmax(resnet50(img), dim=1)
570
+
571
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
572
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
573
+
574
+ # Calculate the prediction time
575
+ pred_time = round(timer() - start_time, 5)
576
+
577
+ # Return the prediction dictionary and prediction time
578
+ return pred_labels_and_probs, pred_time
579
+
580
+ # Create a list of example inputs to our Gradio demo
581
+ example_list = [[str(filepath)] for filepath in random.sample(test_data_paths, k=4)]
582
+ example_list
583
+
584
+ import gradio as gr
585
+
586
+ # Create title, description and article strings
587
+ title = "DeepFundus 👀"
588
+ description = "A ResNet50 feature extractor computer vision model to classify retina pathology from optical funduscopic images."
589
+ article = "Created for fun."
590
+
591
+ # Create the Gradio demo
592
+ demo = gr.Interface(fn=predict, # mapping function from input to output
593
+ inputs=gr.Image(type="pil"), # what are the inputs?
594
+ outputs=[gr.Label(num_top_classes=8, label="Predictions"), # what are the outputs?
595
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
596
+ examples=example_list,
597
+ title=title,
598
+ description=description,
599
+ article=article)
600
+
601
+ # Launch the demo!
602
+ demo.launch(debug=False, # print errors locally?
603
+ share=True) # generate a publically shareable URL?