yvonne1123 commited on
Commit
6348c0b
·
1 Parent(s): de67b00

Upload 28 files

Browse files
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ Model/Epoch_3/backdoor_trans_model.m filter=lfs diff=lfs merge=lfs -text
57
+ Model/Epoch_3/trans_model.m filter=lfs diff=lfs merge=lfs -text
Model/.DS_Store ADDED
Binary file (6.15 kB). View file
 
Model/Epoch_1/.DS_Store ADDED
Binary file (6.15 kB). View file
 
Model/Epoch_1/Contravis.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80abd0724f0f0f42c1eea30b15c6916ec33bd88c20c3b6db9cb96b6360939a9a
3
+ size 7930357
Model/Epoch_1/bgimg.png ADDED

Git LFS Details

  • SHA256: a425e3f143eb4221615df582ea3eaff2bc288751f0aa4830dc18e43e7b3122d1
  • Pointer size: 130 Bytes
  • Size of remote file: 31.6 kB
Model/Epoch_1/embedding.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82411fec08a5d83dea74a46587081b831025d81b9231cc64494433f0c08d018c
3
+ size 400128
Model/Epoch_1/index.json ADDED
The diff for this file is too large to render. See raw diff
 
Model/Epoch_1/scale.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53375c26a4b27cb14b0a11f9b22481ffb6f1f1070b14e4ca160d35a72f2477a8
3
+ size 160
Model/Epoch_1/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e2cf7d8dd4501fa4cabed807349f89108c7a9cda34388142d9d820f552f4162
3
+ size 44773837
Model/Epoch_1/test_data.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d44752b0ad1a4039fd7f631bda1b08a10f72b5e2cab909382327de484a936c
3
+ size 20480128
Model/Epoch_1/train_data.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a58e5ca267fbea233672f26e0a4bfdb4238ef80e8906649b14723cc138ba31dd
3
+ size 102400128
Model/Epoch_2/.DS_Store ADDED
Binary file (6.15 kB). View file
 
Model/Epoch_2/bgimg.png ADDED

Git LFS Details

  • SHA256: da8473430b127f5316f767e2b81b8b07fd59a66d889d342fc7f0a5a7ad516d0b
  • Pointer size: 130 Bytes
  • Size of remote file: 29.4 kB
Model/Epoch_2/embedding.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb926ab79019d0cc4c77bbd39e9c1589ab5b331a51e45d5930cc141bbccf5e21
3
+ size 400128
Model/Epoch_2/index.json ADDED
The diff for this file is too large to render. See raw diff
 
Model/Epoch_2/scale.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b6d12a2ed2914c38c3dc686b3d1a834185bd6a56472a972258246ccd7a77e87
3
+ size 160
Model/Epoch_2/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e2cf7d8dd4501fa4cabed807349f89108c7a9cda34388142d9d820f552f4162
3
+ size 44773837
Model/Epoch_2/test_data.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d44752b0ad1a4039fd7f631bda1b08a10f72b5e2cab909382327de484a936c
3
+ size 20480128
Model/Epoch_2/train_data.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a58e5ca267fbea233672f26e0a4bfdb4238ef80e8906649b14723cc138ba31dd
3
+ size 102400128
Model/Epoch_3/backdoor_trans_model.m ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e50290afb8a789c82e2628b1c7b789c379f041df1bdc12e199b4037460ed115
3
+ size 1057743
Model/Epoch_3/bgimg.png ADDED

Git LFS Details

  • SHA256: a425e3f143eb4221615df582ea3eaff2bc288751f0aa4830dc18e43e7b3122d1
  • Pointer size: 130 Bytes
  • Size of remote file: 31.6 kB
Model/Epoch_3/embedding.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82411fec08a5d83dea74a46587081b831025d81b9231cc64494433f0c08d018c
3
+ size 400128
Model/Epoch_3/index.json ADDED
The diff for this file is too large to render. See raw diff
 
Model/Epoch_3/scale.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53375c26a4b27cb14b0a11f9b22481ffb6f1f1070b14e4ca160d35a72f2477a8
3
+ size 160
Model/Epoch_3/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e2cf7d8dd4501fa4cabed807349f89108c7a9cda34388142d9d820f552f4162
3
+ size 44773837
Model/Epoch_3/test_data.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d44752b0ad1a4039fd7f631bda1b08a10f72b5e2cab909382327de484a936c
3
+ size 20480128
Model/Epoch_3/train_data.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a58e5ca267fbea233672f26e0a4bfdb4238ef80e8906649b14723cc138ba31dd
3
+ size 102400128
Model/Epoch_3/trans_model.m ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16d80809e3e35accbff490fe9d996ca6b2fdb915ad05facdc155415711e15a01
3
+ size 1057743
Model/model.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import os
4
+
5
+
6
+ __all__ = [
7
+ "ResNet",
8
+ "resnet18_with_dropout",
9
+ "resnet18",
10
+ "dropout_resnet18"
11
+ ]
12
+
13
+
14
+ def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
15
+ """3x3 convolution with padding"""
16
+ return nn.Conv2d(
17
+ in_planes,
18
+ out_planes,
19
+ kernel_size=3,
20
+ stride=stride,
21
+ padding=dilation,
22
+ groups=groups,
23
+ bias=False,
24
+ dilation=dilation,
25
+ )
26
+
27
+
28
+ def conv1x1(in_planes, out_planes, stride=1):
29
+ """1x1 convolution"""
30
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
31
+
32
+ class BasicBlock(nn.Module):
33
+ expansion = 1
34
+
35
+ def __init__(
36
+ self,
37
+ inplanes,
38
+ planes,
39
+ stride=1,
40
+ downsample=None,
41
+ groups=1,
42
+ base_width=64,
43
+ dilation=1,
44
+ norm_layer=None,
45
+ ):
46
+ super(BasicBlock, self).__init__()
47
+ if norm_layer is None:
48
+ norm_layer = nn.BatchNorm2d
49
+ if groups != 1 or base_width != 64:
50
+ raise ValueError("BasicBlock only supports groups=1 and base_width=64")
51
+ if dilation > 1:
52
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
53
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
54
+ self.conv1 = conv3x3(inplanes, planes, stride)
55
+ self.bn1 = norm_layer(planes)
56
+ self.relu = nn.ReLU(inplace=True)
57
+ self.conv2 = conv3x3(planes, planes)
58
+ self.bn2 = norm_layer(planes)
59
+ self.downsample = downsample
60
+ self.stride = stride
61
+
62
+
63
+ def forward(self, x):
64
+ identity = x
65
+
66
+ out = self.conv1(x)
67
+ out = self.bn1(out)
68
+ out = self.relu(out)
69
+
70
+ out = self.conv2(out)
71
+ out = self.bn2(out)
72
+
73
+ if self.downsample is not None:
74
+ identity = self.downsample(x)
75
+
76
+ out += identity
77
+ out = self.relu(out)
78
+
79
+ return out
80
+
81
+ class BasicBlock_withDropout(nn.Module):
82
+ expansion = 1
83
+
84
+ def __init__(
85
+ self,
86
+ inplanes,
87
+ planes,
88
+ stride=1,
89
+ downsample=None,
90
+ groups=1,
91
+ base_width=64,
92
+ dilation=1,
93
+ norm_layer=None,
94
+ ):
95
+ super(BasicBlock_withDropout, self).__init__()
96
+ if norm_layer is None:
97
+ norm_layer = nn.BatchNorm2d
98
+ if groups != 1 or base_width != 64:
99
+ raise ValueError("BasicBlock only supports groups=1 and base_width=64")
100
+ if dilation > 1:
101
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
102
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
103
+ self.dropout = nn.Dropout(p=0.5)
104
+ self.conv1 = conv3x3(inplanes, planes, stride)
105
+ self.bn1 = norm_layer(planes)
106
+ self.relu = nn.ReLU(inplace=True)
107
+ self.conv2 = conv3x3(planes, planes)
108
+ self.bn2 = norm_layer(planes)
109
+ self.downsample = downsample
110
+ self.stride = stride
111
+ # print('with_dropout',self.with_dropout)
112
+
113
+ def forward(self, x):
114
+ identity = x
115
+
116
+ out = self.conv1(x)
117
+ out = self.bn1(out)
118
+ out = self.relu(out)
119
+
120
+
121
+ out = self.conv2(out)
122
+ out = self.bn2(out)
123
+
124
+ if self.downsample is not None:
125
+ identity = self.downsample(x)
126
+
127
+ out += identity
128
+ out = self.relu(out)
129
+
130
+ return out
131
+
132
+
133
+ class Bottleneck(nn.Module):
134
+ expansion = 4
135
+
136
+ def __init__(
137
+ self,
138
+ inplanes,
139
+ planes,
140
+ stride=1,
141
+ downsample=None,
142
+ groups=1,
143
+ base_width=64,
144
+ dilation=1,
145
+ norm_layer=None,
146
+ ):
147
+ super(Bottleneck, self).__init__()
148
+ if norm_layer is None:
149
+ norm_layer = nn.BatchNorm2d
150
+ width = int(planes * (base_width / 64.0)) * groups
151
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
152
+ self.conv1 = conv1x1(inplanes, width)
153
+ self.bn1 = norm_layer(width)
154
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
155
+ self.bn2 = norm_layer(width)
156
+ self.conv3 = conv1x1(width, planes * self.expansion)
157
+ self.bn3 = norm_layer(planes * self.expansion)
158
+ self.relu = nn.ReLU(inplace=True)
159
+ self.downsample = downsample
160
+ self.stride = stride
161
+
162
+ def forward(self, x):
163
+ identity = x
164
+
165
+ out = self.conv1(x)
166
+ out = self.bn1(out)
167
+ out = self.relu(out)
168
+
169
+ out = self.conv2(out)
170
+ out = self.bn2(out)
171
+ out = self.relu(out)
172
+
173
+ out = self.conv3(out)
174
+ out = self.bn3(out)
175
+
176
+ if self.downsample is not None:
177
+ identity = self.downsample(x)
178
+
179
+ out += identity
180
+ out = self.relu(out)
181
+
182
+ return out
183
+
184
+
185
+ class ResNet(nn.Module):
186
+ def __init__(
187
+ self,
188
+ block,
189
+ layers,
190
+ with_dropout,
191
+ num_classes=10,
192
+ zero_init_residual=False,
193
+ groups=1,
194
+ width_per_group=64,
195
+ replace_stride_with_dilation=None,
196
+ norm_layer=None,
197
+
198
+ ):
199
+ super(ResNet, self).__init__()
200
+ if norm_layer is None:
201
+ norm_layer = nn.BatchNorm2d
202
+ self._norm_layer = norm_layer
203
+
204
+ self.inplanes = 64
205
+ self.dilation = 1
206
+ if replace_stride_with_dilation is None:
207
+ # each element in the tuple indicates if we should replace
208
+ # the 2x2 stride with a dilated convolution instead
209
+ replace_stride_with_dilation = [False, False, False]
210
+ if len(replace_stride_with_dilation) != 3:
211
+ raise ValueError(
212
+ "replace_stride_with_dilation should be None "
213
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation)
214
+ )
215
+
216
+ self.with_dropout = with_dropout
217
+ self.groups = groups
218
+ self.base_width = width_per_group
219
+
220
+ # CIFAR10: kernel_size 7 -> 3, stride 2 -> 1, padding 3->1
221
+ self.conv1 = nn.Conv2d(
222
+ 3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False
223
+ )
224
+ # END
225
+
226
+ self.bn1 = norm_layer(self.inplanes)
227
+ self.relu = nn.ReLU(inplace=True)
228
+
229
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
230
+ self.layer1 = self._make_layer(block, 64, layers[0])
231
+ self.layer2 = self._make_layer(
232
+ block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
233
+ )
234
+ self.layer3 = self._make_layer(
235
+ block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
236
+ )
237
+ self.layer4 = self._make_layer(
238
+ block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
239
+ )
240
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
241
+ self.fc = nn.Linear(512 * block.expansion, num_classes)
242
+
243
+ if self.with_dropout:
244
+ self.fc = nn.Sequential(nn.Flatten(),nn.Dropout(0.5),nn.Linear(512 * block.expansion, num_classes))
245
+
246
+
247
+
248
+ for m in self.modules():
249
+ if isinstance(m, nn.Conv2d):
250
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
251
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
252
+ nn.init.constant_(m.weight, 1)
253
+ nn.init.constant_(m.bias, 0)
254
+
255
+ # Zero-initialize the last BN in each residual branch,
256
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
257
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
258
+ if zero_init_residual:
259
+ for m in self.modules():
260
+ if isinstance(m, Bottleneck):
261
+ nn.init.constant_(m.bn3.weight, 0)
262
+ elif isinstance(m, BasicBlock):
263
+ nn.init.constant_(m.bn2.weight, 0)
264
+
265
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
266
+ norm_layer = self._norm_layer
267
+ downsample = None
268
+ previous_dilation = self.dilation
269
+ if dilate:
270
+ self.dilation *= stride
271
+ stride = 1
272
+ if stride != 1 or self.inplanes != planes * block.expansion:
273
+ downsample = nn.Sequential(
274
+ conv1x1(self.inplanes, planes * block.expansion, stride),
275
+ norm_layer(planes * block.expansion),
276
+ )
277
+
278
+ layers = []
279
+ layers.append(
280
+ block(
281
+ self.inplanes,
282
+ planes,
283
+ stride,
284
+ downsample,
285
+ self.groups,
286
+ self.base_width,
287
+ previous_dilation,
288
+ norm_layer,
289
+ )
290
+ )
291
+ self.inplanes = planes * block.expansion
292
+ for _ in range(1, blocks):
293
+ layers.append(
294
+ block(
295
+ self.inplanes,
296
+ planes,
297
+ groups=self.groups,
298
+ base_width=self.base_width,
299
+ dilation=self.dilation,
300
+ norm_layer=norm_layer,
301
+ )
302
+ )
303
+
304
+ return nn.Sequential(*layers)
305
+
306
+ def forward(self, x):
307
+ x = self.conv1(x)
308
+ x = self.bn1(x)
309
+ x = self.relu(x)
310
+ x = self.maxpool(x)
311
+
312
+ x = self.layer1(x)
313
+
314
+ x = self.layer2(x)
315
+
316
+ x = self.layer3(x)
317
+
318
+ x = self.layer4(x)
319
+
320
+ x = self.avgpool(x)
321
+ x = x.reshape(x.size(0), -1)
322
+ x = self.fc(x)
323
+
324
+ return x
325
+
326
+ def feature(self, x):
327
+ x = self.conv1(x)
328
+ x = self.bn1(x)
329
+ x = self.relu(x)
330
+ x = self.maxpool(x)
331
+
332
+ x = self.layer1(x)
333
+ x = self.layer2(x)
334
+ x = self.layer3(x)
335
+ x = self.layer4(x)
336
+
337
+ x = self.avgpool(x)
338
+ x = x.reshape(x.size(0), -1)
339
+ return x
340
+ def prediction(self,x):
341
+ x = self.fc(x)
342
+
343
+ return x
344
+
345
+ # def gap(self, x):
346
+ # x = self.conv1(x)
347
+ # x = self.bn1(x)
348
+ # x = self.relu(x)
349
+ # x = self.maxpool(x)
350
+
351
+ # x = self.layer1(x)
352
+ # x = self.layer2(x)
353
+ # x = self.layer3(x)
354
+ # x = self.layer4(x)
355
+
356
+ # x = self.avgpool(x)
357
+ # x = x.reshape(x.size(0), -1)
358
+ # return x
359
+
360
+
361
+ def _resnet(arch, block, layers, pretrained, progress, device, with_dropout, **kwargs):
362
+ model = ResNet(block, layers, with_dropout, **kwargs)
363
+ if pretrained:
364
+ script_dir = os.path.dirname(__file__)
365
+ state_dict = torch.load(
366
+ script_dir + "/state_dicts/" + arch + ".pt", map_location=device
367
+ )
368
+ model.load_state_dict(state_dict)
369
+ return model
370
+
371
+
372
+ def resnet18_with_dropout(pretrained=False, progress=True, device="cpu", **kwargs):
373
+ """Constructs a ResNet-18 model.
374
+ Args:
375
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
376
+ progress (bool): If True, displays a progress bar of the download to stderr
377
+ """
378
+ return _resnet(
379
+ "resnet18", BasicBlock_withDropout, [2, 2, 2, 2], pretrained, progress, device, with_dropout = True, **kwargs
380
+ )
381
+
382
+ def resnet18(pretrained=False, progress=True, device="cpu", **kwargs):
383
+ """Constructs a ResNet-18 model.
384
+ Args:
385
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
386
+ progress (bool): If True, displays a progress bar of the download to stderr
387
+ """
388
+ return _resnet(
389
+ "resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, device, with_dropout = False, **kwargs
390
+ )
391
+
392
+
393
+ def resnet34(pretrained=False, progress=True, device="cpu", **kwargs):
394
+ """Constructs a ResNet-34 model.
395
+ Args:
396
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
397
+ progress (bool): If True, displays a progress bar of the download to stderr
398
+ """
399
+ return _resnet(
400
+ "resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, device, **kwargs
401
+ )
402
+
403
+
404
+ def resnet50(pretrained=False, progress=True, device="cpu", **kwargs):
405
+ """Constructs a ResNet-50 model.
406
+ Args:
407
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
408
+ progress (bool): If True, displays a progress bar of the download to stderr
409
+ """
410
+ return _resnet(
411
+ "resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, device, **kwargs
412
+ )
413
+
414
+ # class dropout_residual(nn.Module):
415
+ # def __init__(self, input_channels, num_channels, dropout_rate, dropout_type, init_dict, use_1x1conv=False, strides=1, **kwargs):
416
+ # super().__init__(**kwargs)
417
+ # self.conv1 = Dropout_Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict)
418
+ # self.conv2 = Dropout_Conv2D(num_channels, num_channels, kernel_size=3, padding=1, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict)
419
+
420
+ # if use_1x1conv:
421
+ # self.conv3 = Dropout_Conv2D(input_channels, num_channels, kernel_size=1, stride=strides, dropout_rate=dropout_rate, dropout_type=dropout_type)
422
+ # else:
423
+ # self.conv3 = None
424
+
425
+ # self.bn1 = nn.BatchNorm2d(num_channels)
426
+ # self.bn2 = nn.BatchNorm2d(num_channels)
427
+
428
+ # def dropout_resnet_block(input_channels, num_channels, num_residuals, dropout_rate, dropout_type, init_dict, first_block=False):
429
+ # blk = []
430
+ # for i in range(num_residuals):
431
+ # if i == 0 and not first_block:
432
+ # blk.append(dropout_residual(input_channels, num_channels, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict, use_1x1conv=True, strides=2))
433
+ # else:
434
+ # blk.append(dropout_residual(num_channels, num_channels, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict))
435
+ # return blk
436
+
437
+ # def dropout_resnet18(dropout_rate=0.5, dropout_type="w", init_dict=dict()):
438
+ # b1 = nn.Sequential(
439
+ # Dropout_Conv2D(1, 64, kernel_size=7, stride=2, padding=3, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict),
440
+ # nn.BatchNorm2d(64),
441
+ # nn.ReLU(),
442
+ # nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
443
+ # )
444
+ # b2 = nn.Sequential(*dropout_resnet_block(64, 64, 2, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict, first_block=True))
445
+ # b3 = nn.Sequential(*dropout_resnet_block(64, 128, 2, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict))
446
+ # b4 = nn.Sequential(*dropout_resnet_block(128, 256, 2, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict))
447
+ # b5 = nn.Sequential(*dropout_resnet_block(256, 512, 2, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict))
448
+
449
+ # return nn.Sequential(b1, b2, b3, b4, b5,
450
+ # nn.AdaptiveAvgPool2d((1,1)),
451
+ # nn.Flatten(),
452
+ # Dropout_Linear(512, 20, dropout_rate=dropout_rate, dropout_type=dropout_type, init_dict=init_dict))
453
+