Emms commited on
Commit
49106b8
1 Parent(s): fb299f9

X-RAY BASE

Browse files
Files changed (45) hide show
  1. TESTS/1002_right._aug_10.jpeg +0 -0
  2. TESTS/10109_left.jpeg +0 -0
  3. TESTS/CHEST_CT_SCANS/000004 (4).png +0 -0
  4. TESTS/CHEST_CT_SCANS/000009 (4).png +0 -0
  5. TESTS/CHEST_CT_SCANS/000108 (3).png +0 -0
  6. TESTS/CHEST_CT_SCANS/000108 (8).png +0 -0
  7. TESTS/CHEST_CT_SCANS/000110.png +0 -0
  8. TESTS/CHEST_CT_SCANS/000112 (2).png +0 -0
  9. TESTS/CHEST_CT_SCANS/000115 (4).png +0 -0
  10. TESTS/CHEST_CT_SCANS/000118 (5).png +0 -0
  11. TESTS/CHEST_CT_SCANS/000120.png +0 -0
  12. TESTS/CHEST_CT_SCANS/10 - Copy - Copy.png +0 -0
  13. TESTS/CHEST_CT_SCANS/12 - Copy (2) - Copy.png +0 -0
  14. TESTS/COVID19/COVID-1.png +0 -0
  15. TESTS/COVID19/COVID-1005.png +0 -0
  16. TESTS/COVID19/COVID-101.png +0 -0
  17. TESTS/DR_0/10007_right.jpeg +0 -0
  18. TESTS/DR_0/1000_right.jpeg +0 -0
  19. TESTS/DR_0/10010_left.jpeg +0 -0
  20. TESTS/DR_0/10031_right._aug_17.jpeg +0 -0
  21. TESTS/DR_1/10030_left._aug_0._aug_6.jpeg +0 -0
  22. TESTS/DR_1/10085_left._aug_23._aug_4.jpeg +0 -0
  23. TESTS/NORMAL/IM-0001-0001.jpeg +0 -0
  24. TESTS/NORMAL/IM-0117-0001.jpeg +0 -0
  25. TESTS/NORMAL/IM-0131-0001.jpeg +0 -0
  26. TESTS/NORMAL/Normal-100.png +0 -0
  27. TESTS/NORMAL/Normal-10004.png +0 -0
  28. TESTS/PNEUMONIA/person1003_bacteria_2934.jpeg +0 -0
  29. TESTS/PNEUMONIA/person1004_bacteria_2935.jpeg +0 -0
  30. TESTS/PNEUMONIA/person100_virus_184.jpeg +0 -0
  31. Utils/CT_Scan_Utils.py +122 -0
  32. Utils/Covid19_Utils.py +116 -0
  33. Utils/DR_Utils.py +207 -0
  34. Utils/Pneumonia_Utils.py +99 -0
  35. Utils/__pycache__/CT_Scan_Utils.cpython-311.pyc +0 -0
  36. Utils/__pycache__/Covid19_Utils.cpython-311.pyc +0 -0
  37. Utils/__pycache__/DR_Utils.cpython-311.pyc +0 -0
  38. Utils/__pycache__/Pneumonia_Utils.cpython-311.pyc +0 -0
  39. app.py +164 -0
  40. app_interface.py +163 -0
  41. cs_models/DenseNet_Covid.pth.tar +3 -0
  42. cs_models/DenseNet_Pneumonia.pth.tar +3 -0
  43. cs_models/EfficientNet_CT_Scans.pth.tar +3 -0
  44. cs_models/model_DR_9.pth.tar +3 -0
  45. requirements.txt +91 -0
TESTS/1002_right._aug_10.jpeg ADDED
TESTS/10109_left.jpeg ADDED
TESTS/CHEST_CT_SCANS/000004 (4).png ADDED
TESTS/CHEST_CT_SCANS/000009 (4).png ADDED
TESTS/CHEST_CT_SCANS/000108 (3).png ADDED
TESTS/CHEST_CT_SCANS/000108 (8).png ADDED
TESTS/CHEST_CT_SCANS/000110.png ADDED
TESTS/CHEST_CT_SCANS/000112 (2).png ADDED
TESTS/CHEST_CT_SCANS/000115 (4).png ADDED
TESTS/CHEST_CT_SCANS/000118 (5).png ADDED
TESTS/CHEST_CT_SCANS/000120.png ADDED
TESTS/CHEST_CT_SCANS/10 - Copy - Copy.png ADDED
TESTS/CHEST_CT_SCANS/12 - Copy (2) - Copy.png ADDED
TESTS/COVID19/COVID-1.png ADDED
TESTS/COVID19/COVID-1005.png ADDED
TESTS/COVID19/COVID-101.png ADDED
TESTS/DR_0/10007_right.jpeg ADDED
TESTS/DR_0/1000_right.jpeg ADDED
TESTS/DR_0/10010_left.jpeg ADDED
TESTS/DR_0/10031_right._aug_17.jpeg ADDED
TESTS/DR_1/10030_left._aug_0._aug_6.jpeg ADDED
TESTS/DR_1/10085_left._aug_23._aug_4.jpeg ADDED
TESTS/NORMAL/IM-0001-0001.jpeg ADDED
TESTS/NORMAL/IM-0117-0001.jpeg ADDED
TESTS/NORMAL/IM-0131-0001.jpeg ADDED
TESTS/NORMAL/Normal-100.png ADDED
TESTS/NORMAL/Normal-10004.png ADDED
TESTS/PNEUMONIA/person1003_bacteria_2934.jpeg ADDED
TESTS/PNEUMONIA/person1004_bacteria_2935.jpeg ADDED
TESTS/PNEUMONIA/person100_virus_184.jpeg ADDED
Utils/CT_Scan_Utils.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from PIL import Image
3
+ import torch
4
+ import matplotlib.pyplot as plt
5
+ import torch.functional as F
6
+ import torch.nn as nn
7
+ import numpy as np
8
+ import torchvision.transforms as transform
9
+ # !pip install efficientnet_pytorch -q
10
+ from efficientnet_pytorch import EfficientNet
11
+
12
+ if torch.cuda.is_available():
13
+ device = torch.device("cuda")
14
+ else:
15
+ device = torch.device("cpu")
16
+
17
+ val_transform = transform.Compose([transform.Resize(size=(224, 224)),
18
+ transform.ToTensor(),
19
+ transform.Normalize(mean=[0.485, 0.456, 0.406],
20
+ std=[0.229, 0.224, 0.225])
21
+ ])
22
+
23
+ def transform_image(image, transforms):
24
+ # img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
25
+ img = transforms(image)
26
+ img = img.unsqueeze(0)
27
+ return img
28
+
29
+ class Efficient(nn.Module):
30
+ def __init__(self, num_classes:int=1):
31
+ super(Efficient, self).__init__()
32
+ self.model = EfficientNet.from_pretrained("efficientnet-b3")
33
+ self.pool = nn.AdaptiveAvgPool2d((1,1))
34
+ self.fc = nn.Linear(1536, 256)
35
+
36
+ self.reg_model = nn.Sequential(
37
+ nn.BatchNorm1d(256),
38
+ nn.Linear(256, 500),
39
+ nn.BatchNorm1d(500),
40
+ nn.Tanh(),
41
+ nn.Dropout(0.2),
42
+ nn.Linear(500, 100),
43
+ nn.BatchNorm1d(100),
44
+ nn.Tanh(),
45
+ nn.Dropout(0.2),
46
+ nn.Linear(100, 4),
47
+ )
48
+
49
+ def forward(self, x):
50
+ x = self.model.extract_features(x)
51
+ x = self.pool(x)
52
+ x = x.view(-1, 1536)
53
+ x = self.fc(x)
54
+ x = self.reg_model(x)
55
+ return x
56
+
57
+ class ModelGradCam(nn.Module):
58
+ def __init__(self, base_model):
59
+ super(ModelGradCam, self).__init__()
60
+
61
+ self.base_model = base_model
62
+ self.features_conv = self.base_model.model.extract_features
63
+ self.pool = self.base_model.pool
64
+ self.fc = self.base_model.fc
65
+ self.classifier = self.base_model.reg_model
66
+ self.gradients = None
67
+
68
+ def activations_hook(self, grad):
69
+ self.gradients = grad
70
+
71
+ def forward(self, x):
72
+ x = self.features_conv(x)
73
+ h = x.register_hook(self.activations_hook)
74
+ x = self.pool(x)
75
+ x = x.view(-1, 1536)
76
+ x = self.fc(x)
77
+ x = self.classifier(x)
78
+ return x
79
+
80
+ def get_activations_gradient(self):
81
+ return self.gradients
82
+
83
+ def get_activations(self, x):
84
+ return self.features_conv(x)
85
+
86
+
87
+ def plot_grad_cam(model, x_ray_image, class_names, normalized=True):
88
+
89
+ model.eval()
90
+ # fig, axs = plt.subplots(1, 2, figsize=(15, 10))
91
+
92
+ image = x_ray_image
93
+ outputs = torch.nn.functional.softmax(model(image), dim=1)
94
+ _, pred = torch.max(outputs, 1)
95
+ outputs[0][pred.detach().cpu().numpy()[0]].backward()
96
+ gradients = model.get_activations_gradient()
97
+ pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])
98
+ activations = model.get_activations(image).detach()
99
+
100
+ activations *= pooled_gradients.unsqueeze(-1).unsqueeze(-1)
101
+ heatmap = torch.mean(activations, dim=1).squeeze()
102
+ heatmap = np.maximum(heatmap.cpu(), 0)
103
+ heatmap /= torch.max(heatmap)
104
+
105
+ img = image.squeeze().permute(1, 2, 0).cpu().numpy()
106
+ img = img if normalized else img/255.0
107
+ heatmap = cv2.resize(heatmap.numpy(), (img.shape[1], img.shape[0]))
108
+ heatmap = np.uint8(255 * heatmap)
109
+ heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
110
+
111
+ superimposed_img = heatmap * 0.0025 + img
112
+ outputs = outputs.tolist()[0]
113
+ output_dict = dict(zip(class_names, np.round(outputs,3)))
114
+ return superimposed_img, class_names[pred.item()], output_dict
115
+ # axs[0].imshow(img)
116
+ # axs[1].imshow(superimposed_img)
117
+ # axs[0].set_title(f'Predicted: {class_names[pred.item()]}\n Confidence: {conf.item():.2f}')
118
+ # axs[0].axis('off')
119
+ # axs[1].set_title(f'Predicted: {class_names[pred.item()]}\n Confidence: {conf.item():.2f}')
120
+ # axs[1].axis('off')
121
+ # plt.show()
122
+
Utils/Covid19_Utils.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from PIL import Image
3
+ import torch
4
+ import matplotlib.pyplot as plt
5
+ import torch.functional as F
6
+ import torch.nn as nn
7
+ import numpy as np
8
+ import torchvision
9
+ import torchvision.transforms as transforms
10
+
11
+ if torch.cuda.is_available():
12
+ device = torch.device("cuda")
13
+ else:
14
+ device = torch.device("cpu")
15
+
16
+ mean_nums = [0.485, 0.456, 0.406]
17
+ std_nums = [0.229, 0.224, 0.225]
18
+
19
+ val_transform = transforms.Compose([
20
+ transforms.Resize((150,150)),
21
+ transforms.CenterCrop(150), #Performs Crop at Center and resizes it to 150x150
22
+ transforms.ToTensor(),
23
+ transforms.Normalize(mean=mean_nums, std = std_nums)
24
+ ])
25
+
26
+ def transform_image(image, transforms):
27
+ # img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
28
+ img = transforms(image)
29
+ img = img.unsqueeze(0)
30
+ return img
31
+
32
+ class DenseNet(nn.Module):
33
+ def __init__(self):
34
+ super(DenseNet, self).__init__()
35
+ self.base_model = torchvision.models.densenet121(weights="DEFAULT").features
36
+ self.pool = nn.AdaptiveAvgPool2d((1,1))
37
+ self.fc = nn.Linear(1024, 1000)
38
+ self.classify = nn.Linear(1000, 1)
39
+ self.classifier = nn.Sigmoid()
40
+
41
+ def forward(self, x):
42
+ x = self.base_model(x)
43
+ x = self.pool(x)
44
+ x = x.view(-1, 1024)
45
+ x = self.fc(x)
46
+ x = self.classify(x)
47
+ x = self.classifier(x)
48
+
49
+ return x
50
+
51
+ class ModelGradCam(nn.Module):
52
+ def __init__(self, base_model):
53
+ super(ModelGradCam, self).__init__()
54
+
55
+ self.features_conv = base_model.base_model
56
+ self.pool = base_model.pool
57
+ self.fc = base_model.fc
58
+ self.classify = base_model.classify
59
+ self.classifier = base_model.classifier
60
+ self.gradients = None
61
+
62
+ def activations_hook(self, grad):
63
+ self.gradients = grad
64
+
65
+ def forward(self, x):
66
+ x = self.features_conv(x)
67
+ h = x.register_hook(self.activations_hook)
68
+ x = self.pool(x)
69
+ x = x.view(-1, 1024)
70
+ x = self.fc(x)
71
+ x = self.classify(x)
72
+ x = self.classifier(x)
73
+ return x
74
+
75
+ def get_activations_gradient(self):
76
+ return self.gradients
77
+
78
+ def get_activations(self, x):
79
+ return self.features_conv(x)
80
+
81
+ def plot_grad_cam(model, x_ray_image, class_names, threshold:int=0.5, normalized=True):
82
+
83
+ model.eval()
84
+ # fig, axs = plt.subplots(1, 2, figsize=(15, 10))
85
+
86
+ image = x_ray_image
87
+ outputs = model(image).view(-1)
88
+ conf = [1-outputs.item(), outputs.item()]
89
+ # conf = 1 - outputs if outputs < threshold else outputs
90
+ pred = torch.where(outputs > threshold, torch.tensor(1, device=device), torch.tensor(0, device=device))
91
+ outputs[0].backward()
92
+ gradients = model.get_activations_gradient()
93
+ pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])
94
+ activations = model.get_activations(image).detach()
95
+
96
+ activations *= pooled_gradients.unsqueeze(-1).unsqueeze(-1)
97
+ heatmap = torch.mean(activations, dim=1).squeeze()
98
+ heatmap = np.maximum(heatmap.cpu(), 0)
99
+ heatmap /= torch.max(heatmap)
100
+
101
+ img = image.squeeze().permute(1, 2, 0).cpu().numpy()
102
+ img = img if normalized else img/255.0
103
+ heatmap = cv2.resize(heatmap.numpy(), (img.shape[1], img.shape[0]))
104
+ heatmap = np.uint8(255 * heatmap)
105
+ heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
106
+
107
+ superimposed_img = heatmap * 0.0045 + img
108
+ output_dict = dict(zip(class_names, np.round(conf,3)))
109
+ return superimposed_img, class_names[pred.item()], output_dict
110
+ # axs[0].imshow(img)
111
+ # axs[1].imshow(superimposed_img)
112
+ # axs[0].set_title(f'Predicted: {class_names[pred.item()]}\n Confidence: {conf.item():.3f}')
113
+ # axs[0].axis('off')
114
+ # axs[1].set_title(f'Predicted: {class_names[pred.item()]}\n Confidence: {conf.item():.3f}')
115
+ # axs[1].axis('off')
116
+ # plt.show()
Utils/DR_Utils.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from PIL import Image
3
+ import torch
4
+ import matplotlib.pyplot as plt
5
+ import torch.functional as F
6
+ import torch.nn as nn
7
+ import numpy as np
8
+ import albumentations as A
9
+ from albumentations.pytorch import ToTensorV2
10
+ # !pip install efficientnet_pytorch -q
11
+ from efficientnet_pytorch import EfficientNet
12
+
13
+ if torch.cuda.is_available():
14
+ device = torch.device("cuda")
15
+ else:
16
+ device = torch.device("cpu")
17
+
18
+ val_transform = A.Compose(
19
+ [
20
+ A.Resize(height=300, width=300),
21
+ A.Normalize(
22
+ mean=[0.3199, 0.2240, 0.1609],
23
+ std=[0.3020, 0.2183, 0.1741],
24
+ max_pixel_value=255.0,
25
+ ),
26
+ ToTensorV2(),
27
+ ]
28
+ )
29
+
30
+ def transform_image(image_1, image_2, transforms):
31
+ # img_1 = cv2.cvtColor(cv2.imread(image_path_1), cv2.COLOR_BGR2RGB)
32
+ img_1 = transforms(image=np.array(image_1))['image']
33
+ img_1 = img_1.unsqueeze(0)
34
+
35
+ # img_2 = cv2.cvtColor(cv2.imread(image_path_2), cv2.COLOR_BGR2RGB)
36
+ img_2 = transforms(image=np.array(image_2))['image']
37
+ img_2 = img_2.unsqueeze(0)
38
+ images = {'img1':img_1,'img2':img_2}
39
+ return images
40
+
41
+ class BasicConv2d(nn.Module):
42
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False):
43
+ super(BasicConv2d, self).__init__()
44
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,stride=stride,padding=padding,bias=bias)
45
+ self.norm = nn.BatchNorm2d(out_channels, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
46
+
47
+ def forward(self,x):
48
+ x = self.conv1(x)
49
+ x = self.norm(x)
50
+ return x
51
+
52
+
53
+
54
+ class BottleNeck(nn.Module):
55
+ def __init__(self, prev_channels, in_channels, out_channels, kernel_size=3, stride=2, padding=1, reduce=False):
56
+ super(BottleNeck, self).__init__()
57
+ self.reduce = reduce
58
+
59
+ self.ReduceBlock1 = BasicConv2d(prev_channels, in_channels, kernel_size=1, stride=stride, padding=0)
60
+ self.ReduceBlock2 = BasicConv2d(prev_channels, out_channels, kernel_size=1, stride=stride, padding=0)
61
+
62
+ self.Block1 = BasicConv2d(prev_channels, in_channels, kernel_size=1, stride=1, padding=0)
63
+ self.Block2 = BasicConv2d(in_channels, in_channels, kernel_size=kernel_size, stride=1, padding=padding)
64
+ self.Block3 = BasicConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
65
+ self.relu = nn.ReLU()
66
+
67
+ def forward(self, x):
68
+ out = x
69
+ if self.reduce:
70
+ out = self.ReduceBlock1(x)
71
+ out = self.relu(out)
72
+ identity = self.ReduceBlock2(x)
73
+ else:
74
+ out = self.Block1(out)
75
+ out = self.relu(out)
76
+ out = self.Block2(out)
77
+ out = self.relu(out)
78
+ out = self.Block3(out)
79
+ if self.reduce:
80
+ out = self.relu(out+identity)
81
+
82
+ return out
83
+
84
+ class ConvolutionNeuralNetwork(nn.Module):
85
+ def __init__(self, num_classes: int=1) -> nn.Module:
86
+ super(ConvolutionNeuralNetwork, self).__init__()
87
+ self.conv1 = BasicConv2d(3, 64, 7, 2, 3)
88
+ self.pool1 = nn.MaxPool2d(kernel_size=3,stride=2)
89
+
90
+ self.ResBlock2a = BottleNeck(64, 64, 256, 3, 1, 1, reduce=True)
91
+ self.ResBlock2b = BottleNeck(256, 64, 256, 3)
92
+ self.ResBlock2c = BottleNeck(256, 64, 256, 3)
93
+
94
+ self.avgpool = nn.AdaptiveAvgPool2d((1,1))
95
+ self.reg_model = nn.Sequential(
96
+ nn.BatchNorm1d(256* 2),
97
+ nn.Linear((256) * 2, 500),
98
+ nn.BatchNorm1d(500),
99
+ nn.ReLU(),
100
+ nn.Dropout(0.2),
101
+ nn.Linear(500, 100),
102
+ nn.BatchNorm1d(100),
103
+ nn.ReLU(),
104
+ nn.Dropout(0.2),
105
+ nn.Linear(100, 2),
106
+ )
107
+
108
+ def forward(self, images):
109
+ img = self.conv1(images['img1'])
110
+ img = self.pool1(img)
111
+ img = self.ResBlock2a(img)
112
+ img = self.ResBlock2b(img)
113
+ img = self.ResBlock2c(img)
114
+ img = self.avgpool(img)
115
+ img = torch.flatten(img, 1)
116
+
117
+ img1= self.conv1(images['img2'])
118
+ img1= self.pool1(img1)
119
+ img1= self.ResBlock2a(img1)
120
+ img1= self.ResBlock2b(img1)
121
+ img1= self.ResBlock2c(img1)
122
+ img1 = self.avgpool(img1)
123
+ img1 = torch.flatten(img1, 1)
124
+
125
+ conc = torch.cat((img, img1), dim=1)
126
+ x = self.reg_model(conc)
127
+
128
+ return x
129
+
130
+
131
+ class Efficient(nn.Module):
132
+ def __init__(self, num_classes:int=1):
133
+ super(Efficient, self).__init__()
134
+ self.model = EfficientNet.from_pretrained("efficientnet-b3")
135
+ num_features = self.model._fc.in_features
136
+ self.model._fc = nn.Linear(num_features, 256)
137
+
138
+ self.reg_model = nn.Sequential(
139
+ nn.BatchNorm1d(256* 2),
140
+ nn.Linear((256) * 2, 500),
141
+ nn.BatchNorm1d(500),
142
+ nn.ReLU(),
143
+ nn.Dropout(0.2),
144
+ nn.Linear(500, 100),
145
+ nn.BatchNorm1d(100),
146
+ nn.ReLU(),
147
+ nn.Dropout(0.2),
148
+ nn.Linear(100, 2),
149
+ )
150
+
151
+ def forward(self, images):
152
+ img1 = self.model(images['img1'])
153
+ img2 = self.model(images['img2'])
154
+ conc = torch.cat((img1,img2), dim=1)
155
+ x = self.reg_model(conc)
156
+ return x
157
+
158
+ class EnsembleModel(nn.Module):
159
+ def __init__(self, model_cnn, model_eff):
160
+ super(EnsembleModel, self).__init__()
161
+ self.model_cnn = model_cnn
162
+ self.model_eff = model_eff
163
+ assert model_cnn.reg_model[-1].out_features == model_eff.reg_model[-1].out_features
164
+ # They both have same num_classes so we dont need to edit any code here for the fully connected layer
165
+
166
+ def forward(self, images):
167
+ model_cnn_output = self.model_cnn(images)
168
+ model_res_output = self.model_eff(images)
169
+ ensemble_output = (model_cnn_output + model_res_output) / 2.0
170
+ # ensemble_output = torch.cat((model_cnn_output, model_res_output), dim=1)
171
+ return ensemble_output
172
+
173
+ def Inf_predict_image(model:nn.Module, images, class_names) -> None:
174
+ model.eval()
175
+ # fig, axs = plt.subplots(1, 2, figsize=(15, 10))
176
+
177
+ for img in images:
178
+ images[img] = images[img].to(device)
179
+
180
+ predictions = model(images)
181
+
182
+ # Convert MSE floats to integer predictions
183
+ predictions[predictions < 0.5] = 0
184
+ predictions[(predictions >= 0.5) & (predictions < 1.5)] = 1
185
+ predictions[(predictions >= 1.5) & (predictions < 2.5)] = 2
186
+ predictions[(predictions >= 2.5) & (predictions < 3.5)] = 3
187
+ predictions[(predictions >= 3.5) & (predictions < 10000000)] = 4
188
+ predictions = predictions.long().squeeze(1)
189
+
190
+ image_1 = images['img1'].squeeze().permute(1, 2, 0).cpu().numpy()
191
+ image_2 = images['img2'].squeeze().permute(1, 2, 0).cpu().numpy()
192
+
193
+ predicted_label1 = predictions[0][0].item()
194
+ predicted_label2 = predictions[0][1].item()
195
+
196
+ return class_names[predicted_label1], class_names[predicted_label2]
197
+ # axs[0].imshow(image_1)
198
+ # axs[1].imshow(image_2)
199
+ # axs[0].set_title(f'Predicted: ({class_names[predicted_label1]})')
200
+ # axs[1].set_title(f'Predicted: ({class_names[predicted_label2]})')
201
+ # axs[0].axis('off')
202
+ # axs[1].axis('off')
203
+
204
+ # plt.show()
205
+
206
+
207
+
Utils/Pneumonia_Utils.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from PIL import Image
3
+ import torch
4
+ import matplotlib.pyplot as plt
5
+ import torch.functional as F
6
+ import torch.nn as nn
7
+ import numpy as np
8
+ import torchvision
9
+ import torchvision.transforms as transform
10
+ # !pip install efficientnet_pytorch -q
11
+ from efficientnet_pytorch import EfficientNet
12
+
13
+ if torch.cuda.is_available():
14
+ device = torch.device("cuda")
15
+ else:
16
+ device = torch.device("cpu")
17
+
18
+ val_transform = transform.Compose([transform.Resize(255),
19
+ transform.CenterCrop(224),
20
+ transform.ToTensor(),
21
+ ])
22
+
23
+ def transform_image(image, transforms):
24
+ # img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
25
+ img = transforms(image)
26
+ img = img.unsqueeze(0)
27
+ return img
28
+
29
+ DenseNet = torchvision.models.densenet161(weights="DEFAULT")
30
+ for param in DenseNet.parameters():
31
+ param.requires_grad = True
32
+ in_features = DenseNet.classifier.in_features
33
+ DenseNet.classifier = nn.Linear(in_features, 2)
34
+
35
+
36
+
37
+ class ModelGradCam(nn.Module):
38
+ def __init__(self, base_model):
39
+ super(ModelGradCam, self).__init__()
40
+
41
+ self.base_model = base_model
42
+ self.features_conv = self.base_model.features
43
+ self.pool = nn.AdaptiveAvgPool2d((1,1))
44
+ self.classifier = self.base_model.classifier
45
+ self.gradients = None
46
+
47
+ def activations_hook(self, grad):
48
+ self.gradients = grad
49
+
50
+ def forward(self, x):
51
+ x = self.features_conv(x)
52
+ h = x.register_hook(self.activations_hook)
53
+ x = self.pool(x)
54
+ x = x.view(-1, 2208)
55
+ x = self.classifier(x)
56
+ return x
57
+
58
+ def get_activations_gradient(self):
59
+ return self.gradients
60
+
61
+ def get_activations(self, x):
62
+ return self.features_conv(x)
63
+
64
+ def plot_grad_cam(model, x_ray_image, class_names, normalized=True):
65
+
66
+ model.eval()
67
+ # fig, axs = plt.subplots(1, 2, figsize=(15, 10))
68
+
69
+ image = x_ray_image
70
+ outputs = torch.nn.functional.softmax(model(image), dim=1)
71
+ _, pred = torch.max(outputs, 1)
72
+ outputs[0][pred.detach().cpu().numpy()[0]].backward()
73
+ gradients = model.get_activations_gradient()
74
+ pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])
75
+ activations = model.get_activations(image).detach()
76
+
77
+ activations *= pooled_gradients.unsqueeze(-1).unsqueeze(-1)
78
+ heatmap = torch.mean(activations, dim=1).squeeze()
79
+ heatmap = np.maximum(heatmap.cpu(), 0)
80
+ heatmap /= torch.max(heatmap)
81
+
82
+ img = image.squeeze().permute(1, 2, 0).cpu().numpy()
83
+ img = img if normalized else img/255.0
84
+ heatmap = cv2.resize(heatmap.numpy(), (img.shape[1], img.shape[0]))
85
+ heatmap = np.uint8(255 * heatmap)
86
+ heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
87
+
88
+ superimposed_img = heatmap * 0.0025 + img
89
+ outputs = outputs.tolist()[0]
90
+ output_dict = dict(zip(class_names, np.round(outputs,3)))
91
+ return superimposed_img, class_names[pred.item()], output_dict
92
+ # axs[0].imshow(img)
93
+ # axs[1].imshow(superimposed_img)
94
+ # axs[0].set_title(f'Predicted: {class_names[pred.item()]}\n Confidence: {conf.item():.2f}')
95
+ # axs[0].axis('off')
96
+ # axs[1].set_title(f'Predicted: {class_names[pred.item()]}\n Confidence: {conf.item():.2f}')
97
+ # axs[1].axis('off')
98
+ # plt.show()
99
+
Utils/__pycache__/CT_Scan_Utils.cpython-311.pyc ADDED
Binary file (8.12 kB). View file
 
Utils/__pycache__/Covid19_Utils.cpython-311.pyc ADDED
Binary file (7.72 kB). View file
 
Utils/__pycache__/DR_Utils.cpython-311.pyc ADDED
Binary file (12.9 kB). View file
 
Utils/__pycache__/Pneumonia_Utils.cpython-311.pyc ADDED
Binary file (6.33 kB). View file
 
app.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import gradio as gr
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from PIL import Image
7
+ from functools import partial
8
+
9
+ import Utils.Pneumonia_Utils as PU
10
+ import Utils.CT_Scan_Utils as CSU
11
+ import Utils.Covid19_Utils as C19U
12
+ import Utils.DR_Utils as DRU
13
+
14
+ # Constants for model paths
15
+ CANCER_MODEL_PATH = 'cs_models/EfficientNet_CT_Scans.pth.tar'
16
+ DIABETIC_RETINOPATHY_MODEL_PATH = 'cs_models/model_DR_9.pth.tar'
17
+ PNEUMONIA_MODEL_PATH = 'cs_models/DenseNet_Pneumonia.pth.tar'
18
+ COVID_MODEL_PATH = 'cs_models/DenseNet_Covid.pth.tar'
19
+
20
+ # Constants for class labels
21
+ CANCER_CLASS_LABELS = ['adenocarcinoma','large.cell.carcinoma','normal','squamous.cell.carcinoma']
22
+ DIABETIC_RETINOPATHY_CLASS_LABELS = ['No DR','Mild', 'Moderate', 'Severe', 'Proliferative DR']
23
+ PNEUMONIA_CLASS_LABELS = ['Normal', 'Pneumonia']
24
+ COVID_CLASS_LABELS = ['Normal','Covid19']
25
+
26
+ if torch.cuda.is_available():
27
+ device = torch.device("cuda")
28
+ else:
29
+ device = torch.device("cpu")
30
+
31
+
32
+ def cancer_page(image, test_model):
33
+ x_ray_image = CSU.transform_image(image, CSU.val_transform)
34
+ x_ray_image = x_ray_image.to(device)
35
+ grad_x_ray_image, pred_label, pred_conf = CSU.plot_grad_cam(test_model,
36
+ x_ray_image,
37
+ CANCER_CLASS_LABELS,
38
+ normalized=True)
39
+ grad_x_ray_image = np.clip(grad_x_ray_image, 0, 1)
40
+ return grad_x_ray_image, pred_label, pred_conf
41
+
42
+
43
+ def covid_page(image, test_model):
44
+ x_ray_image = C19U.transform_image(image, C19U.val_transform)
45
+ x_ray_image = x_ray_image.to(device)
46
+ grad_x_ray_image, pred_label, pred_conf = C19U.plot_grad_cam(test_model,
47
+ x_ray_image,
48
+ COVID_CLASS_LABELS,
49
+ normalized=True)
50
+ grad_x_ray_image = np.clip(grad_x_ray_image, 0, 1)
51
+ return grad_x_ray_image, pred_label, pred_conf
52
+
53
+
54
+ def pneumonia_page(image, test_model):
55
+ x_ray_image = PU.transform_image(image, PU.val_transform)
56
+ x_ray_image = x_ray_image.to(device)
57
+ grad_x_ray_image, pred_label, pred_conf = PU.plot_grad_cam(test_model,
58
+ x_ray_image,
59
+ PNEUMONIA_CLASS_LABELS,
60
+ normalized=True)
61
+ grad_x_ray_image = np.clip(grad_x_ray_image, 0, 1)
62
+ return grad_x_ray_image, pred_label, pred_conf
63
+
64
+ def diabetic_retinopathy_page(image_1, image_2, test_model):
65
+ images = DRU.transform_image(image_1, image_2, DRU.val_transform)
66
+ pred_label_1, pred_label_2 = DRU.Inf_predict_image(test_model,
67
+ images,
68
+ DIABETIC_RETINOPATHY_CLASS_LABELS)
69
+ return pred_label_1, pred_label_2
70
+
71
+ CSU_model = CSU.Efficient().to(device)
72
+ CSU_model.load_state_dict(torch.load(CANCER_MODEL_PATH,map_location=torch.device('cpu')),strict=False)
73
+ CSU_test_model = CSU.ModelGradCam(CSU_model).to(device)
74
+ CSU_images_dir = "TESTS/CHEST_CT_SCANS"
75
+ all_images = os.listdir(CSU_images_dir)
76
+ CSU_examples = [[os.path.join(CSU_images_dir,image)] for image in np.random.choice(all_images, size=4, replace=False)]
77
+
78
+ C19U_model = C19U.DenseNet().to(device)
79
+ C19U_model.load_state_dict(torch.load(COVID_MODEL_PATH,map_location=torch.device('cpu')),strict=False)
80
+ C19U_test_model = C19U.ModelGradCam(C19U_model).to(device)
81
+ C19U_C19_images_dir = [[os.path.join("TESTS/COVID19",image)] for image in np.random.choice(os.listdir("TESTS/COVID19"), size=2, replace=False)]
82
+ NORM_images_dir = [[os.path.join("TESTS/NORMAL",image)] for image in np.random.choice(os.listdir("TESTS/NORMAL"), size=2, replace=False)]
83
+ C19U_examples = C19U_C19_images_dir + NORM_images_dir
84
+
85
+ PU_model = PU.DenseNet.to(device)
86
+ PU_model.load_state_dict(torch.load(PNEUMONIA_MODEL_PATH,map_location=torch.device('cpu')),strict=False)
87
+ PU_test_model = PU.ModelGradCam(PU_model).to(device)
88
+ PU_images_dir = [[os.path.join("TESTS/PNEUMONIA",image)] for image in np.random.choice(os.listdir("TESTS/PNEUMONIA"), size=2, replace=False)]
89
+ NORM_images_dir = [[os.path.join("TESTS/NORMAL",image)] for image in np.random.choice(os.listdir("TESTS/NORMAL"), size=2, replace=False)]
90
+ PU_examples = PU_images_dir + NORM_images_dir
91
+
92
+ DRU_cnn_model = DRU.ConvolutionNeuralNetwork().to(device)
93
+ DRU_eff_b3 = DRU.Efficient().to(device)
94
+ DRU_ensemble = DRU.EnsembleModel(DRU_cnn_model, DRU_eff_b3).to(device)
95
+ DRU_ensemble.load_state_dict(torch.load(DIABETIC_RETINOPATHY_MODEL_PATH,map_location=torch.device('cpu'))["state_dict"], strict=False)
96
+ DRU_test_model = DRU_ensemble
97
+ DRU_examples = [['TESTS/DR_1/10030_left._aug_0._aug_6.jpeg','TESTS/DR_0/10031_right._aug_17.jpeg']]
98
+
99
+ demo = gr.Blocks(title="X-RAY_CLASSIFIER")
100
+
101
+ with demo:
102
+
103
+ gr.Markdown(
104
+ """ # WELCOME, Try Out the X-ray_Classifier Below
105
+ Try out the following classification models below."""
106
+ )
107
+
108
+ with gr.Tab("Chest Cancer"):
109
+ with gr.Row():
110
+ cancer_input = gr.Image(type="pil", label="Image")
111
+ cancer_output1 = gr.Image(type="numpy", label="Heatmap Image")
112
+ cancer_output2 = gr.Textbox(label="Labels Present")
113
+ cancer_output3 = gr.Label(label="Probabilities", show_label=False)
114
+ cancer_button = gr.Button("Predict")
115
+ cancer_examples = gr.Examples(CSU_examples, inputs=[cancer_input])
116
+
117
+ with gr.Tab("Covid19"):
118
+ with gr.Row():
119
+ covid_input = gr.Image(type="pil", label="Image")
120
+ covid_output1 = gr.Image(type="numpy", label="Heatmap Image")
121
+ covid_output2 = gr.Textbox(label="Labels Present")
122
+ covid_output3 = gr.Label(label="Probabilities", show_label=False)
123
+ covid_button = gr.Button("Predict")
124
+ covid_examples = gr.Examples(C19U_examples, inputs=[covid_input])
125
+
126
+ with gr.Tab("Pneumonia"):
127
+ with gr.Row():
128
+ pneumonia_input = gr.Image(type="pil", label="Image")
129
+ pneumonia_output1 = gr.Image(type="numpy", label="Heatmap Image")
130
+ pneumonia_output2 = gr.Textbox(label="Labels Present")
131
+ pneumonia_output3 = gr.Label(label="Probabilities", show_label=False)
132
+ pneumonia_button = gr.Button("Predict")
133
+ pneumonia_examples = gr.Examples(PU_examples, inputs=[pneumonia_input])
134
+
135
+ with gr.Tab("Diabetic Retinopathy"):
136
+ with gr.Row():
137
+ dr_input1 = gr.Image(type="pil", label="Image")
138
+ dr_input2 = gr.Image(type="pil", label="Image")
139
+ dr_output1 = gr.Textbox(label="Labels Present")
140
+ dr_output2 = gr.Textbox(label="Labels Present")
141
+ dr_button = gr.Button("Predict")
142
+ dr_examples = gr.Examples(DRU_examples, inputs=[dr_input1, dr_input2])
143
+
144
+ cancer_button.click(partial(cancer_page, test_model=CSU_test_model),
145
+ inputs=cancer_input,
146
+ outputs=[cancer_output1, cancer_output2, cancer_output3])
147
+
148
+ covid_button.click(partial(covid_page, test_model=C19U_test_model),
149
+ inputs=covid_input,
150
+ outputs=[covid_output1, covid_output2, covid_output3])
151
+
152
+ pneumonia_button.click(partial(pneumonia_page, test_model=PU_test_model),
153
+ inputs=pneumonia_input,
154
+ outputs=[pneumonia_output1, pneumonia_output2, pneumonia_output3])
155
+
156
+ dr_button.click(partial(diabetic_retinopathy_page,
157
+ test_model=DRU_test_model),
158
+ inputs=[dr_input1, dr_input2],
159
+ outputs=[dr_output1, dr_output2])
160
+
161
+
162
+ if __name__ == "__main__":
163
+
164
+ demo.launch()
app_interface.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import gradio as gr
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from PIL import Image
7
+ from functools import partial
8
+
9
+ import Utils.Pneumonia_Utils as PU
10
+ import Utils.CT_Scan_Utils as CSU
11
+ import Utils.Covid19_Utils as C19U
12
+ import Utils.DR_Utils as DRU
13
+
14
+ # Constants for model paths
15
+ CANCER_MODEL_PATH = 'cs_models/EfficientNet_CT_Scans.pth.tar'
16
+ DIABETIC_RETINOPATHY_MODEL_PATH = 'cs_models/model_DR_9.pth.tar'
17
+ PNEUMONIA_MODEL_PATH = 'cs_models/DenseNet_Pneumonia.pth.tar'
18
+ COVID_MODEL_PATH = 'cs_models/DenseNet_Covid.pth.tar'
19
+
20
+ # Constants for class labels
21
+ CANCER_CLASS_LABELS = ['adenocarcinoma','large.cell.carcinoma','normal','squamous.cell.carcinoma']
22
+ DIABETIC_RETINOPATHY_CLASS_LABELS = ['No DR','Mild', 'Moderate', 'Severe', 'Proliferative DR']
23
+ PNEUMONIA_CLASS_LABELS = ['Normal', 'Pneumonia']
24
+ COVID_CLASS_LABELS = ['Normal','Covid19']
25
+
26
+ if torch.cuda.is_available():
27
+ device = torch.device("cuda")
28
+ else:
29
+ device = torch.device("cpu")
30
+
31
+
32
+ def cancer_page(image, test_model):
33
+ x_ray_image = CSU.transform_image(image, CSU.val_transform)
34
+ x_ray_image = x_ray_image.to(device)
35
+ grad_x_ray_image, pred_label, pred_conf = CSU.plot_grad_cam(test_model,
36
+ x_ray_image,
37
+ CANCER_CLASS_LABELS,
38
+ normalized=True)
39
+ grad_x_ray_image = np.clip(grad_x_ray_image, 0, 1)
40
+ return grad_x_ray_image, pred_label, pred_conf
41
+
42
+
43
+ def covid_page(image, test_model):
44
+ x_ray_image = C19U.transform_image(image, C19U.val_transform)
45
+ x_ray_image = x_ray_image.to(device)
46
+ grad_x_ray_image, pred_label, pred_conf = C19U.plot_grad_cam(test_model,
47
+ x_ray_image,
48
+ COVID_CLASS_LABELS,
49
+ normalized=True)
50
+ grad_x_ray_image = np.clip(grad_x_ray_image, 0, 1)
51
+ return grad_x_ray_image, pred_label, pred_conf
52
+
53
+
54
+ def pneumonia_page(image, test_model):
55
+ x_ray_image = PU.transform_image(image, PU.val_transform)
56
+ x_ray_image = x_ray_image.to(device)
57
+ grad_x_ray_image, pred_label, pred_conf = PU.plot_grad_cam(test_model,
58
+ x_ray_image,
59
+ PNEUMONIA_CLASS_LABELS,
60
+ normalized=True)
61
+ grad_x_ray_image = np.clip(grad_x_ray_image, 0, 1)
62
+ return grad_x_ray_image, pred_label, pred_conf
63
+
64
+ def diabetic_retinopathy_page(image_1, image_2, test_model):
65
+ images = DRU.transform_image(image_1, image_2, DRU.val_transform)
66
+ pred_label_1, pred_label_2 = DRU.Inf_predict_image(test_model,
67
+ images,
68
+ DIABETIC_RETINOPATHY_CLASS_LABELS)
69
+ return pred_label_1, pred_label_2
70
+
71
+ if __name__ == "__main__":
72
+
73
+ CSU_model = CSU.Efficient().to(device)
74
+ CSU_model.load_state_dict(torch.load(CANCER_MODEL_PATH,map_location=torch.device('cpu')),strict=False)
75
+ CSU_test_model = CSU.ModelGradCam(CSU_model).to(device)
76
+ CSU_images_dir = "TESTS/CHEST_CT_SCANS"
77
+ all_images = os.listdir(CSU_images_dir)
78
+ CSU_examples = [[os.path.join(CSU_images_dir,image)] for image in np.random.choice(all_images, size=4, replace=False)]
79
+
80
+ C19U_model = C19U.DenseNet().to(device)
81
+ C19U_model.load_state_dict(torch.load(COVID_MODEL_PATH,map_location=torch.device('cpu')),strict=False)
82
+ C19U_test_model = C19U.ModelGradCam(C19U_model).to(device)
83
+ C19U_C19_images_dir = [[os.path.join("TESTS/COVID19",image)] for image in np.random.choice(os.listdir("TESTS/COVID19"), size=2, replace=False)]
84
+ NORM_images_dir = [[os.path.join("TESTS/NORMAL",image)] for image in np.random.choice(os.listdir("TESTS/NORMAL"), size=2, replace=False)]
85
+ C19U_examples = C19U_C19_images_dir + NORM_images_dir
86
+
87
+ PU_model = PU.DenseNet.to(device)
88
+ PU_model.load_state_dict(torch.load(PNEUMONIA_MODEL_PATH,map_location=torch.device('cpu')),strict=False)
89
+ PU_test_model = PU.ModelGradCam(PU_model).to(device)
90
+ PU_images_dir = [[os.path.join("TESTS/PNEUMONIA",image)] for image in np.random.choice(os.listdir("TESTS/PNEUMONIA"), size=2, replace=False)]
91
+ NORM_images_dir = [[os.path.join("TESTS/NORMAL",image)] for image in np.random.choice(os.listdir("TESTS/NORMAL"), size=2, replace=False)]
92
+ PU_examples = PU_images_dir + NORM_images_dir
93
+
94
+ DRU_cnn_model = DRU.ConvolutionNeuralNetwork().to(device)
95
+ DRU_eff_b3 = DRU.Efficient().to(device)
96
+ DRU_ensemble = DRU.EnsembleModel(DRU_cnn_model, DRU_eff_b3).to(device)
97
+ DRU_ensemble.load_state_dict(torch.load(DIABETIC_RETINOPATHY_MODEL_PATH,map_location=torch.device('cpu'))["state_dict"], strict=False)
98
+ DRU_test_model = DRU_ensemble
99
+ DRU_examples = [['TESTS/DR_1/10030_left._aug_0._aug_6.jpeg','TESTS/DR_0/10031_right._aug_17.jpeg']]
100
+
101
+ cancer_interface = gr.Interface(
102
+ fn=partial(cancer_page,test_model=CSU_test_model),
103
+ inputs=gr.Image(type="pil", label="Image"),
104
+ outputs=[
105
+ gr.Image(type="numpy", label="Heatmap Image"),
106
+ gr.Textbox(label="Labels Present"),
107
+ gr.Label(label="Probabilities", show_label=False)
108
+ ],
109
+ examples=CSU_examples,
110
+ cache_examples=False,
111
+ allow_flagging="never",
112
+ title="Chest Cancer Detection System"
113
+ )
114
+
115
+ covid_interface = gr.Interface(
116
+ fn=partial(covid_page,test_model=C19U_test_model),
117
+ inputs=gr.Image(type="pil", label="Image"),
118
+ outputs=[
119
+ gr.Image(type="numpy", label="Heatmap Image"),
120
+ gr.Textbox(label="Labels Present"),
121
+ gr.Label(label="Probabilities", show_label=False)
122
+ ],
123
+ examples=C19U_examples,
124
+ cache_examples=False,
125
+ allow_flagging="never",
126
+ title="Covid Detection System"
127
+ )
128
+
129
+ pneumonia_interface = gr.Interface(
130
+ fn=partial(pneumonia_page,test_model=PU_test_model),
131
+ inputs=gr.Image(type="pil", label="Image"),
132
+ outputs=[
133
+ gr.Image(type="numpy", label="Heatmap Image"),
134
+ gr.Textbox(label="Labels Present"),
135
+ gr.Label(label="Probabilities", show_label=False)
136
+ ],
137
+ examples=PU_examples,
138
+ cache_examples=False,
139
+ allow_flagging="never",
140
+ title="Pneumonia Detection System"
141
+ )
142
+
143
+ diabetic_retinopathy_interface = gr.Interface(
144
+ fn=partial(diabetic_retinopathy_page,test_model=DRU_test_model),
145
+ inputs=[gr.Image(type="pil", label="Image"), gr.Image(type="pil", label="Image")],
146
+ outputs=[
147
+ gr.Textbox(label="Labels Present"),
148
+ gr.Textbox(label="Labels Present")
149
+ ],
150
+ examples=DRU_examples,
151
+ cache_examples=False,
152
+ allow_flagging="never",
153
+ title="Diabetic Retinopathy System"
154
+ )
155
+
156
+ demo = gr.TabbedInterface(
157
+ [cancer_interface,
158
+ covid_interface,
159
+ pneumonia_interface,
160
+ diabetic_retinopathy_interface],
161
+ ["Chest Cancer", "Covid19", "Pneumonia", "Diabetic Retinopathy"])
162
+
163
+ demo.launch(share=True)
cs_models/DenseNet_Covid.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb47891bf00093d9d4d8aaf4c5462bf0a90113c57576ab58f8a66c75075523e
3
+ size 32421783
cs_models/DenseNet_Pneumonia.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e272eec89833a606b59a3c9a7a005a87d19e16395e225de5702d65e08fcb7cf4
3
+ size 107168245
cs_models/EfficientNet_CT_Scans.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a29db772edcf0669168d68e25da57d125ad13c55a242828768e42f3e109e5b06
3
+ size 51806517
cs_models/model_DR_9.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5867469cd55865529bbdfb9a0fa01bff09797bbe23132cbb1d32585b8f2d15cc
3
+ size 144866177
requirements.txt ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ albumentations==1.3.1
3
+ altair==5.1.1
4
+ annotated-types==0.5.0
5
+ anyio==3.7.1
6
+ attrs==23.1.0
7
+ certifi==2023.7.22
8
+ charset-normalizer==3.2.0
9
+ click==8.1.7
10
+ cmake==3.27.5
11
+ contourpy==1.1.1
12
+ cycler==0.11.0
13
+ efficientnet-pytorch==0.7.1
14
+ fastapi==0.103.1
15
+ ffmpy==0.3.1
16
+ filelock==3.12.4
17
+ fonttools==4.42.1
18
+ fsspec==2023.9.2
19
+ gradio==3.44.4
20
+ gradio_client==0.5.1
21
+ h11==0.14.0
22
+ httpcore==0.18.0
23
+ httpx==0.25.0
24
+ huggingface-hub==0.17.2
25
+ idna==3.4
26
+ imageio==2.31.3
27
+ importlib-resources==6.1.0
28
+ Jinja2==3.1.2
29
+ joblib==1.3.2
30
+ jsonschema==4.19.1
31
+ jsonschema-specifications==2023.7.1
32
+ kiwisolver==1.4.5
33
+ lazy_loader==0.3
34
+ lit==16.0.6
35
+ MarkupSafe==2.1.3
36
+ matplotlib==3.8.0
37
+ mpmath==1.3.0
38
+ networkx==3.1
39
+ numpy==1.26.0
40
+ nvidia-cublas-cu11==11.10.3.66
41
+ nvidia-cuda-cupti-cu11==11.7.101
42
+ nvidia-cuda-nvrtc-cu11==11.7.99
43
+ nvidia-cuda-runtime-cu11==11.7.99
44
+ nvidia-cudnn-cu11==8.5.0.96
45
+ nvidia-cufft-cu11==10.9.0.58
46
+ nvidia-curand-cu11==10.2.10.91
47
+ nvidia-cusolver-cu11==11.4.0.1
48
+ nvidia-cusparse-cu11==11.7.4.91
49
+ nvidia-nccl-cu11==2.14.3
50
+ nvidia-nvtx-cu11==11.7.91
51
+ opencv-python==4.8.0.76
52
+ opencv-python-headless==4.8.0.76
53
+ orjson==3.9.7
54
+ packaging==23.1
55
+ pandas==2.1.1
56
+ Pillow==10.0.1
57
+ pydantic==2.3.0
58
+ pydantic_core==2.6.3
59
+ pydub==0.25.1
60
+ pyparsing==3.1.1
61
+ python-dateutil==2.8.2
62
+ python-multipart==0.0.6
63
+ pytz==2023.3.post1
64
+ PyWavelets==1.4.1
65
+ PyYAML==6.0.1
66
+ qudida==0.0.4
67
+ referencing==0.30.2
68
+ requests==2.31.0
69
+ rpds-py==0.10.3
70
+ scikit-image==0.21.0
71
+ scikit-learn==1.3.1
72
+ scipy==1.11.2
73
+ semantic-version==2.10.0
74
+ six==1.16.0
75
+ sniffio==1.3.0
76
+ starlette==0.27.0
77
+ sympy==1.12
78
+ threadpoolctl==3.2.0
79
+ tifffile==2023.9.18
80
+ toolz==0.12.0
81
+ torch==2.0.1
82
+ torchvision==0.15.2
83
+ tornado==6.3.3
84
+ tqdm==4.66.1
85
+ triton==2.0.0
86
+ typing_extensions==4.8.0
87
+ tzdata==2023.3
88
+ urllib3==2.0.5
89
+ uvicorn==0.23.2
90
+ websockets==11.0.3
91
+ wntr==1.0.0