Spaces:
Configuration error

englert commited on
Commit
95db469
·
1 Parent(s): e059c3c

update app.py, add resnet50.py

Browse files
Files changed (3) hide show
  1. app.py +10 -2
  2. model +0 -3
  3. resnet50.py +355 -0
app.py CHANGED
@@ -8,10 +8,18 @@ import numpy as np
8
  import gradio as gr
9
  import torch
10
 
 
11
  from sampling_util import furthest_neighbours
12
  from video_reader import video_reader
13
 
14
- model = torch.load("model").eval()
 
 
 
 
 
 
 
15
  avg_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
16
 
17
 
@@ -77,4 +85,4 @@ demo = gr.Interface(
77
  inputs=[gr.inputs.Video(label="Upload Video File"), gr.inputs.Number(Label="Downsample size")],
78
  outputs=gr.outputs.File(label="Zip"))
79
 
80
- demo.launch()
 
8
  import gradio as gr
9
  import torch
10
 
11
+ from resnet50 import resnet18
12
  from sampling_util import furthest_neighbours
13
  from video_reader import video_reader
14
 
15
+ model = resnet18(
16
+ output_dim=0,
17
+ nmb_prototypes=0,
18
+ eval_mode=True,
19
+ hidden_mlp=0,
20
+ normalize=False)
21
+ model.load_state_dict(torch.load("model.pt"))
22
+ model.eval()
23
  avg_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
24
 
25
 
 
85
  inputs=[gr.inputs.Video(label="Upload Video File"), gr.inputs.Number(Label="Downsample size")],
86
  outputs=gr.outputs.File(label="Zip"))
87
 
88
+ demo.launch()
model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:61fdeced8efaa14180ef11f528c92cd8dff7077490f967aef32c5ec7d7e8d15c
3
- size 55943397
 
 
 
 
resnet50.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ #
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+
12
+ def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
13
+ """3x3 convolution with padding"""
14
+ return nn.Conv2d(
15
+ in_planes,
16
+ out_planes,
17
+ kernel_size=3,
18
+ stride=stride,
19
+ padding=dilation,
20
+ groups=groups,
21
+ bias=False,
22
+ dilation=dilation,
23
+ )
24
+
25
+
26
+ def conv1x1(in_planes, out_planes, stride=1):
27
+ """1x1 convolution"""
28
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
29
+
30
+
31
+ class BasicBlock(nn.Module):
32
+ expansion = 1
33
+ __constants__ = ["downsample"]
34
+
35
+ def __init__(
36
+ self,
37
+ inplanes,
38
+ planes,
39
+ stride=1,
40
+ downsample=None,
41
+ groups=1,
42
+ base_width=64,
43
+ dilation=1,
44
+ norm_layer=None,
45
+ ):
46
+ super(BasicBlock, self).__init__()
47
+ if norm_layer is None:
48
+ norm_layer = nn.BatchNorm2d
49
+ if groups != 1 or base_width != 64:
50
+ raise ValueError("BasicBlock only supports groups=1 and base_width=64")
51
+ if dilation > 1:
52
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
53
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
54
+ self.conv1 = conv3x3(inplanes, planes, stride)
55
+ self.bn1 = norm_layer(planes)
56
+ self.relu = nn.ReLU(inplace=True)
57
+ self.conv2 = conv3x3(planes, planes)
58
+ self.bn2 = norm_layer(planes)
59
+ self.downsample = downsample
60
+ self.stride = stride
61
+
62
+ def forward(self, x):
63
+ identity = x
64
+
65
+ out = self.conv1(x)
66
+ out = self.bn1(out)
67
+ out = self.relu(out)
68
+
69
+ out = self.conv2(out)
70
+ out = self.bn2(out)
71
+
72
+ if self.downsample is not None:
73
+ identity = self.downsample(x)
74
+
75
+ out += identity
76
+ out = self.relu(out)
77
+
78
+ return out
79
+
80
+
81
+ class Bottleneck(nn.Module):
82
+ expansion = 4
83
+ __constants__ = ["downsample"]
84
+
85
+ def __init__(
86
+ self,
87
+ inplanes,
88
+ planes,
89
+ stride=1,
90
+ downsample=None,
91
+ groups=1,
92
+ base_width=64,
93
+ dilation=1,
94
+ norm_layer=None,
95
+ ):
96
+ super(Bottleneck, self).__init__()
97
+ if norm_layer is None:
98
+ norm_layer = nn.BatchNorm2d
99
+ width = int(planes * (base_width / 64.0)) * groups
100
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
101
+ self.conv1 = conv1x1(inplanes, width)
102
+ self.bn1 = norm_layer(width)
103
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
104
+ self.bn2 = norm_layer(width)
105
+ self.conv3 = conv1x1(width, planes * self.expansion)
106
+ self.bn3 = norm_layer(planes * self.expansion)
107
+ self.relu = nn.ReLU(inplace=True)
108
+ self.downsample = downsample
109
+ self.stride = stride
110
+
111
+ def forward(self, x):
112
+ identity = x
113
+
114
+ out = self.conv1(x)
115
+ out = self.bn1(out)
116
+ out = self.relu(out)
117
+
118
+ out = self.conv2(out)
119
+ out = self.bn2(out)
120
+ out = self.relu(out)
121
+
122
+ out = self.conv3(out)
123
+ out = self.bn3(out)
124
+
125
+ if self.downsample is not None:
126
+ identity = self.downsample(x)
127
+
128
+ out += identity
129
+ out = self.relu(out)
130
+
131
+ return out
132
+
133
+
134
+ class ResNet(nn.Module):
135
+ def __init__(
136
+ self,
137
+ block,
138
+ layers,
139
+ zero_init_residual=False,
140
+ groups=1,
141
+ widen=1,
142
+ width_per_group=64,
143
+ replace_stride_with_dilation=None,
144
+ norm_layer=None,
145
+ normalize=False,
146
+ output_dim=0,
147
+ hidden_mlp=0,
148
+ nmb_prototypes=0,
149
+ eval_mode=False,
150
+ ):
151
+ super(ResNet, self).__init__()
152
+ if norm_layer is None:
153
+ norm_layer = nn.BatchNorm2d
154
+ self._norm_layer = norm_layer
155
+
156
+ self.eval_mode = eval_mode
157
+ self.padding = nn.ConstantPad2d(1, 0.0)
158
+
159
+ self.inplanes = width_per_group * widen
160
+ self.dilation = 1
161
+ if replace_stride_with_dilation is None:
162
+ # each element in the tuple indicates if we should replace
163
+ # the 2x2 stride with a dilated convolution instead
164
+ replace_stride_with_dilation = [False, False, False]
165
+ if len(replace_stride_with_dilation) != 3:
166
+ raise ValueError(
167
+ "replace_stride_with_dilation should be None "
168
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation)
169
+ )
170
+ self.groups = groups
171
+ self.base_width = width_per_group
172
+
173
+ # change padding 3 -> 2 compared to original torchvision code because added a padding layer
174
+ num_out_filters = width_per_group * widen
175
+ self.conv1 = nn.Conv2d(
176
+ 3, num_out_filters, kernel_size=3, stride=1, padding=1, bias=False
177
+ )
178
+ self.bn1 = norm_layer(num_out_filters)
179
+ self.relu = nn.ReLU(inplace=True)
180
+ # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
181
+ self.layer1 = self._make_layer(block, num_out_filters, layers[0])
182
+ num_out_filters *= 2
183
+ self.layer2 = self._make_layer(
184
+ block, num_out_filters, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
185
+ )
186
+ num_out_filters *= 2
187
+ self.layer3 = self._make_layer(
188
+ block, num_out_filters, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
189
+ )
190
+ num_out_filters *= 2
191
+ self.layer4 = self._make_layer(
192
+ block, num_out_filters, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
193
+ )
194
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
195
+
196
+ # normalize output features
197
+ self.l2norm = normalize
198
+
199
+ # projection head
200
+ if output_dim == 0:
201
+ self.projection_head = None
202
+ elif hidden_mlp == 0:
203
+ self.projection_head = nn.Linear(num_out_filters * block.expansion, output_dim)
204
+ else:
205
+ self.projection_head = nn.Sequential(
206
+ nn.Linear(num_out_filters * block.expansion, hidden_mlp),
207
+ nn.BatchNorm1d(hidden_mlp),
208
+ nn.ReLU(inplace=True),
209
+ nn.Linear(hidden_mlp, output_dim),
210
+ )
211
+
212
+ # prototype layer
213
+ self.prototypes = None
214
+ if isinstance(nmb_prototypes, list):
215
+ self.prototypes = MultiPrototypes(output_dim, nmb_prototypes)
216
+ elif nmb_prototypes > 0:
217
+ self.prototypes = nn.Linear(output_dim, nmb_prototypes, bias=False)
218
+
219
+ for m in self.modules():
220
+ if isinstance(m, nn.Conv2d):
221
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
222
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
223
+ nn.init.constant_(m.weight, 1)
224
+ nn.init.constant_(m.bias, 0)
225
+
226
+ # Zero-initialize the last BN in each residual branch,
227
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
228
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
229
+ if zero_init_residual:
230
+ for m in self.modules():
231
+ if isinstance(m, Bottleneck):
232
+ nn.init.constant_(m.bn3.weight, 0)
233
+ elif isinstance(m, BasicBlock):
234
+ nn.init.constant_(m.bn2.weight, 0)
235
+
236
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
237
+ norm_layer = self._norm_layer
238
+ downsample = None
239
+ previous_dilation = self.dilation
240
+ if dilate:
241
+ self.dilation *= stride
242
+ stride = 1
243
+ if stride != 1 or self.inplanes != planes * block.expansion:
244
+ downsample = nn.Sequential(
245
+ conv1x1(self.inplanes, planes * block.expansion, stride),
246
+ norm_layer(planes * block.expansion),
247
+ )
248
+
249
+ layers = []
250
+ layers.append(
251
+ block(
252
+ self.inplanes,
253
+ planes,
254
+ stride,
255
+ downsample,
256
+ self.groups,
257
+ self.base_width,
258
+ previous_dilation,
259
+ norm_layer,
260
+ )
261
+ )
262
+ self.inplanes = planes * block.expansion
263
+ for _ in range(1, blocks):
264
+ layers.append(
265
+ block(
266
+ self.inplanes,
267
+ planes,
268
+ groups=self.groups,
269
+ base_width=self.base_width,
270
+ dilation=self.dilation,
271
+ norm_layer=norm_layer,
272
+ )
273
+ )
274
+
275
+ return nn.Sequential(*layers)
276
+
277
+ def forward_backbone(self, x):
278
+ x = self.padding(x)
279
+
280
+ x = self.conv1(x)
281
+ x = self.bn1(x)
282
+ x = self.relu(x)
283
+ # x = self.maxpool(x)
284
+ x = self.layer1(x)
285
+ x = self.layer2(x)
286
+ x = self.layer3(x)
287
+ x = self.layer4(x)
288
+
289
+ if self.eval_mode:
290
+ return x
291
+
292
+ x = self.avgpool(x)
293
+ x = torch.flatten(x, 1)
294
+
295
+ return x
296
+
297
+ def forward_head(self, x):
298
+ if self.projection_head is not None:
299
+ x = self.projection_head(x)
300
+
301
+ if self.l2norm:
302
+ x = nn.functional.normalize(x, dim=1, p=2)
303
+
304
+ if self.prototypes is not None:
305
+ return x, self.prototypes(x)
306
+ return x
307
+
308
+ def forward(self, inputs):
309
+ if not isinstance(inputs, list):
310
+ inputs = [inputs]
311
+ idx_crops = torch.cumsum(torch.unique_consecutive(
312
+ torch.tensor([inp.shape[-1] for inp in inputs]),
313
+ return_counts=True,
314
+ )[1], 0)
315
+ start_idx = 0
316
+ for end_idx in idx_crops:
317
+ _out = self.forward_backbone(torch.cat(inputs[start_idx: end_idx]).cuda(non_blocking=True))
318
+ if start_idx == 0:
319
+ output = _out
320
+ else:
321
+ output = torch.cat((output, _out))
322
+ start_idx = end_idx
323
+ return self.forward_head(output)
324
+
325
+
326
+ class MultiPrototypes(nn.Module):
327
+ def __init__(self, output_dim, nmb_prototypes):
328
+ super(MultiPrototypes, self).__init__()
329
+ self.nmb_heads = len(nmb_prototypes)
330
+ for i, k in enumerate(nmb_prototypes):
331
+ self.add_module("prototypes" + str(i), nn.Linear(output_dim, k, bias=False))
332
+
333
+ def forward(self, x):
334
+ out = []
335
+ for i in range(self.nmb_heads):
336
+ out.append(getattr(self, "prototypes" + str(i))(x))
337
+ return out
338
+
339
+ def resnet18(**kwargs):
340
+ return ResNet(Bottleneck, [2, 2, 2, 2], **kwargs)
341
+
342
+ def resnet50(**kwargs):
343
+ return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
344
+
345
+
346
+ def resnet50w2(**kwargs):
347
+ return ResNet(Bottleneck, [3, 4, 6, 3], widen=2, **kwargs)
348
+
349
+
350
+ def resnet50w4(**kwargs):
351
+ return ResNet(Bottleneck, [3, 4, 6, 3], widen=4, **kwargs)
352
+
353
+
354
+ def resnet50w5(**kwargs):
355
+ return ResNet(Bottleneck, [3, 4, 6, 3], widen=5, **kwargs)