biplab2008 commited on
Commit
75280f1
·
verified ·
1 Parent(s): 53b038e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +254 -0
app.py CHANGED
@@ -5,6 +5,260 @@ import numpy as np
5
  import torch
6
  from PIL import Image
7
  import torchvision.transforms as transforms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
  def load_model():
 
5
  import torch
6
  from PIL import Image
7
  import torchvision.transforms as transforms
8
+ from typing import NamedTuple, List, Callable, List, Tuple, Optional
9
+ from torch import nn
10
+ import torch.nn.functional as F
11
+
12
+ class LinData(NamedTuple):
13
+ in_dim : int # input dimension
14
+ hidden_layers : List[int] # hidden layers including the output layer
15
+ activations : List[Optional[Callable[[torch.Tensor],torch.Tensor]]] # list of activations
16
+ bns : List[bool] # list of bools
17
+ dropouts : List[Optional[float]] # list of dropouts probas
18
+
19
+ class CNNData(NamedTuple):
20
+ in_dim : int # input dimension
21
+ n_f : List[int] # num filters
22
+ kernel_size : List[Tuple] # kernel size [(5,5,5), (3,3,3),(3,3,3)]
23
+ activations : List[Optional[Callable[[torch.Tensor],torch.Tensor]]] # activation list
24
+ bns : List[bool] # batch normialization [True, True, False]
25
+ dropouts : List[Optional[float]] # # list of dropouts probas [.5,0,0]
26
+ #dropouts_ps : list # [0.5,.7, 0]
27
+ paddings : List[Optional[Tuple]] #[(0,0,0),(0,0,0), (0,0,0)]
28
+ strides : List[Optional[Tuple]] #[(1,1,1),(1,1,1),(1,1,1)]
29
+
30
+
31
+ class NetData(NamedTuple):
32
+ cnn3d : CNNData
33
+ lin : LinData
34
+
35
+ class CNN3D_Mike(nn.Module):
36
+ def __init__(self, t_dim=30, img_x=256 , img_y=342, drop_p=0, fc_hidden1=256, fc_hidden2=256):
37
+ super(CNN3D_Mike, self).__init__() # set video dimension
38
+ self.t_dim = t_dim
39
+ self.img_x = img_x
40
+ self.img_y = img_y
41
+ # fully connected layer hidden nodes
42
+ self.fc_hidden1, self.fc_hidden2 = fc_hidden1, fc_hidden2
43
+ self.drop_p = drop_p
44
+ #self.num_classes = num_classes
45
+ self.ch1, self.ch2 = 32, 48
46
+ self.k1, self.k2 = (5, 5, 5), (3, 3, 3) # 3d kernel size
47
+ self.s1, self.s2 = (2, 2, 2), (2, 2, 2) # 3d strides
48
+ self.pd1, self.pd2 = (0, 0, 0), (0, 0, 0) # 3d padding # compute conv1 & conv2 output shape
49
+ self.conv1_outshape = conv3D_output_size((self.t_dim, self.img_x, self.img_y), self.pd1, self.k1, self.s1)
50
+ self.conv2_outshape = conv3D_output_size(self.conv1_outshape, self.pd2, self.k2, self.s2)
51
+ self.conv1 = nn.Conv3d(in_channels=1, out_channels=self.ch1, kernel_size=self.k1, stride=self.s1,
52
+ padding=self.pd1)
53
+ self.bn1 = nn.BatchNorm3d(self.ch1)
54
+ self.conv2 = nn.Conv3d(in_channels=self.ch1, out_channels=self.ch2, kernel_size=self.k2, stride=self.s2,
55
+ padding=self.pd2)
56
+ self.bn2 = nn.BatchNorm3d(self.ch2)
57
+ self.relu = nn.ReLU(inplace=True)
58
+ self.drop = nn.Dropout3d(self.drop_p)
59
+ self.pool = nn.MaxPool3d(2)
60
+ self.fc1 = nn.Linear(self.ch2*self.conv2_outshape[0]*self.conv2_outshape[1]*self.conv2_outshape[2],
61
+ self.fc_hidden1) # fully connected hidden layer
62
+ self.fc2 = nn.Linear(self.fc_hidden1, self.fc_hidden2)
63
+ self.fc3 = nn.Linear(self.fc_hidden2,1) # fully connected layer, output = multi-classes
64
+
65
+
66
+ def forward(self, x_3d):
67
+ # Conv 1
68
+ x = self.conv1(x_3d)
69
+
70
+ x = self.bn1(x)
71
+ x = self.relu(x)
72
+ x = self.drop(x)
73
+ # Conv 2
74
+ x = self.conv2(x)
75
+ x = self.bn2(x)
76
+ x = self.relu(x)
77
+ x = self.drop(x)
78
+ # FC 1 and 2
79
+ x = x.view(x.size(0), -1)
80
+ x = F.relu(self.fc1(x))
81
+ x = F.relu(self.fc2(x))
82
+
83
+ #x = F.relu(self.fc3(x))
84
+ #x = F.relu(self.fc3(x))
85
+ x = F.dropout(x, p=self.drop_p, training=self.training)
86
+ #x = self.fc3(x)
87
+ #x = F.softmax(self.fc2(x))
88
+
89
+ x = self.fc3(x)
90
+
91
+
92
+
93
+ return x
94
+
95
+
96
+
97
+ class CNNLayers(nn.Module):
98
+
99
+ def __init__(self, args):
100
+
101
+ super(CNNLayers, self).__init__()
102
+
103
+ self.in_dim = args.in_dim# 1/3
104
+ self.n_f = args.n_f#[32,64]
105
+ self.kernel_size = args.kernel_size # [(5,5,5), (3,3,3)]
106
+ self.activations = args.activations#['relu', 'relu']
107
+ self.bns = args.bns #[True, True],
108
+ self.dropouts = args.dropouts #[True, True]
109
+ #self.dropouts_ps = args.dropouts_ps#[0.5,.7]
110
+ self.paddings = args.paddings #[(0,0,0),(0,0,0)]
111
+ self.strides = args.strides # strides [(1,1,1),(1,1,1),(1,1,1)])
112
+ #self.poolings = args.poolings
113
+
114
+ assert len(self.n_f) == len(self.activations) == len(self.bns) == len(self.dropouts), 'dimensions mismatch : check dimensions!'
115
+
116
+ # generate layers seq of seq
117
+ self._get_layers()
118
+
119
+ def _get_layers(self):
120
+
121
+ layers =nn.ModuleList()
122
+ in_channels = self.in_dim
123
+
124
+ for idx, chans in enumerate(self.n_f):
125
+ sub_layers = nn.ModuleList()
126
+
127
+ sub_layers.append(nn.Conv3d(in_channels = in_channels,
128
+ out_channels = chans, #self.n_f[idx],
129
+ kernel_size = self.kernel_size[idx],
130
+ stride = self.strides[idx],
131
+ padding = self.paddings[idx]
132
+ ))
133
+
134
+
135
+
136
+ if self.bns[idx] : sub_layers.append(nn.BatchNorm3d(num_features = self.n_f[idx]))
137
+
138
+ #if self.dropouts[idx] : sub_layers.append(nn.Dropout3d(p = self.dropouts_ps[idx]))
139
+
140
+ if self.dropouts[idx] : sub_layers.append(nn.Dropout3d(p = self.dropouts[idx]))
141
+
142
+ #if self.activations[idx] : sub_layers.append(self.__class__.get_activation(self.activations[idx]))
143
+
144
+ if self.activations[idx] : sub_layers.append(self.activations[idx])
145
+
146
+ sub_layers = nn.Sequential(*sub_layers)
147
+
148
+ layers.append(sub_layers)
149
+
150
+ in_channels = self.n_f[idx]
151
+
152
+ self.layers = nn.Sequential(*layers)
153
+
154
+
155
+ @staticmethod
156
+ def get_activation(activation):
157
+ if activation == 'relu':
158
+ activation=nn.ReLU()
159
+ elif activation == 'leakyrelu':
160
+ activation=nn.LeakyReLU(negative_slope=0.1)
161
+ elif activation == 'selu':
162
+ activation=nn.SELU()
163
+
164
+ return activation
165
+
166
+
167
+
168
+ def forward(self, x):
169
+
170
+ x = self.layers(x)
171
+
172
+ return x
173
+
174
+
175
+
176
+ class CNN3D(nn.Module):
177
+
178
+ def __init__(self, args):
179
+ super(CNN3D,self).__init__()
180
+ # check datatype
181
+ if not isinstance(args, NetData):
182
+ raise TypeError("input must be a ParserClass")
183
+
184
+ self.cnn3d = CNNLayers(args.cnn3d)
185
+
186
+ self.lin = LinLayers(args.lin)
187
+
188
+ self.in_dim = args.lin.in_dim
189
+
190
+
191
+ def forward(self, x):
192
+
193
+ # cnn 3d
194
+ x = self.cnn3d(x)
195
+
196
+ x = x.view(-1, self.in_dim)
197
+
198
+ # feedforward
199
+ x = self.lin(x)
200
+
201
+ return x
202
+
203
+
204
+
205
+
206
+ class LinLayers(nn.Module):
207
+
208
+ def __init__(self, args):
209
+ super(LinLayers,self).__init__()
210
+
211
+ in_dim= args.in_dim #16,
212
+ hidden_layers= args.hidden_layers #[512,256,128,2],
213
+ activations=args.activations#[nn.LeakyReLU(0.2),nn.LeakyReLU(0.2),nn.LeakyReLU(0.2)],
214
+ batchnorms=args.bns#[True,True,True],
215
+ dropouts = args.dropouts#[None, 0.2, 0.2]
216
+
217
+
218
+ assert len(hidden_layers) == len(activations) == len(batchnorms) == len(dropouts), 'dimensions mismatch!'
219
+
220
+
221
+ layers=nn.ModuleList()
222
+
223
+ if hidden_layers:
224
+ old_dim=in_dim
225
+ for idx,layer in enumerate(hidden_layers):
226
+ sub_layers = nn.ModuleList()
227
+ sub_layers.append(nn.Linear(old_dim,layer))
228
+ if batchnorms[idx] : sub_layers.append(nn.BatchNorm1d(num_features=layer))
229
+ if activations[idx] : sub_layers.append(activations[idx])
230
+ if dropouts[idx] : sub_layers.append(nn.Dropout(p=dropouts[idx]))
231
+ old_dim = layer
232
+
233
+ sub_layers = nn.Sequential(*sub_layers)
234
+
235
+ layers.append(sub_layers)
236
+
237
+
238
+
239
+ else:# for single layer
240
+ layers.append(nn.Linear(in_dim,out_dim))
241
+ if batchnorms : layers.append(nn.BatchNorm1d(num_features=out_dim))
242
+ if activations : layers.append(activations)
243
+ if dropouts : layers.append(nn.Dropout(p=dropouts))
244
+
245
+ self.layers = nn.Sequential(*layers)
246
+
247
+
248
+
249
+ def forward(self,x):
250
+
251
+ x = self.layers(x)
252
+
253
+ return x
254
+
255
+ '''
256
+ def _check_dimensions(self):
257
+ if isinstance(self.hidden_layers,list) :
258
+ assert len(self.hidden_layers)==len(self.activations)
259
+ assert len(self.hidden_layers)==len(self.batchnorms)
260
+ assert len(self.hidden_layers)==len(self.dropouts)
261
+ '''
262
 
263
 
264
  def load_model():