Spaces:
Sleeping
Sleeping
Update core/networks.py
Browse files- core/networks.py +360 -355
core/networks.py
CHANGED
@@ -1,355 +1,360 @@
|
|
1 |
-
# Copyright (C) 2021 * Ltd. All rights reserved.
|
2 |
-
# author : Sanghyeon Jo <[email protected]>
|
3 |
-
|
4 |
-
import math
|
5 |
-
|
6 |
-
import torch
|
7 |
-
import torch.nn as nn
|
8 |
-
import torch.nn.functional as F
|
9 |
-
|
10 |
-
from torchvision import models
|
11 |
-
import torch.utils.model_zoo as model_zoo
|
12 |
-
|
13 |
-
from .arch_resnet import resnet
|
14 |
-
from .arch_resnest import resnest
|
15 |
-
from .abc_modules import ABC_Model
|
16 |
-
|
17 |
-
from .deeplab_utils import ASPP, Decoder
|
18 |
-
from .aff_utils import PathIndex
|
19 |
-
from .puzzle_utils import tile_features, merge_features
|
20 |
-
|
21 |
-
from tools.ai.torch_utils import resize_for_tensors
|
22 |
-
|
23 |
-
#######################################################################
|
24 |
-
# Normalization
|
25 |
-
#######################################################################
|
26 |
-
from .sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
|
27 |
-
|
28 |
-
class FixedBatchNorm(nn.BatchNorm2d):
|
29 |
-
def forward(self, x):
|
30 |
-
return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps)
|
31 |
-
|
32 |
-
def group_norm(features):
|
33 |
-
return nn.GroupNorm(4, features)
|
34 |
-
#######################################################################
|
35 |
-
|
36 |
-
class Backbone(nn.Module, ABC_Model):
|
37 |
-
|
38 |
-
super().__init__()
|
39 |
-
|
40 |
-
self.mode = mode
|
41 |
-
|
42 |
-
if self.mode == 'fix':
|
43 |
-
self.norm_fn = FixedBatchNorm
|
44 |
-
else:
|
45 |
-
self.norm_fn = nn.BatchNorm2d
|
46 |
-
|
47 |
-
if 'resnet' in model_name:
|
48 |
-
self.model = resnet.ResNet(resnet.Bottleneck, resnet.layers_dic[model_name], strides=(2, 2, 2, 1),
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
self.
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
l
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
self.
|
167 |
-
nn.Conv2d(
|
168 |
-
nn.GroupNorm(4, 32),
|
169 |
-
nn.ReLU(inplace=True),
|
170 |
-
)
|
171 |
-
self.
|
172 |
-
nn.Conv2d(
|
173 |
-
nn.GroupNorm(4, 32),
|
174 |
-
nn.
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
nn.
|
179 |
-
nn.
|
180 |
-
nn.
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
nn.
|
185 |
-
nn.
|
186 |
-
nn.
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
x = self.
|
274 |
-
x = self.
|
275 |
-
|
276 |
-
|
277 |
-
x = self.
|
278 |
-
x =
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
def
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
self.
|
315 |
-
nn.Conv2d(
|
316 |
-
nn.GroupNorm(4, 32),
|
317 |
-
nn.ReLU(inplace=True),
|
318 |
-
)
|
319 |
-
self.
|
320 |
-
nn.Conv2d(
|
321 |
-
nn.GroupNorm(4, 32),
|
322 |
-
nn.
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
nn.
|
327 |
-
nn.
|
328 |
-
nn.
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
nn.
|
333 |
-
nn.
|
334 |
-
nn.
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2021 * Ltd. All rights reserved.
|
2 |
+
# author : Sanghyeon Jo <[email protected]>
|
3 |
+
|
4 |
+
import math
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
|
10 |
+
from torchvision import models
|
11 |
+
import torch.utils.model_zoo as model_zoo
|
12 |
+
|
13 |
+
from .arch_resnet import resnet
|
14 |
+
from .arch_resnest import resnest
|
15 |
+
from .abc_modules import ABC_Model
|
16 |
+
|
17 |
+
from .deeplab_utils import ASPP, Decoder
|
18 |
+
from .aff_utils import PathIndex
|
19 |
+
from .puzzle_utils import tile_features, merge_features
|
20 |
+
|
21 |
+
from tools.ai.torch_utils import resize_for_tensors
|
22 |
+
|
23 |
+
#######################################################################
|
24 |
+
# Normalization
|
25 |
+
#######################################################################
|
26 |
+
from .sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
|
27 |
+
|
28 |
+
class FixedBatchNorm(nn.BatchNorm2d):
|
29 |
+
def forward(self, x):
|
30 |
+
return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps)
|
31 |
+
|
32 |
+
def group_norm(features):
|
33 |
+
return nn.GroupNorm(4, features)
|
34 |
+
#######################################################################
|
35 |
+
|
36 |
+
class Backbone(nn.Module, ABC_Model):
|
37 |
+
def __init__(self, model_name, num_classes=20, mode='fix', segmentation=False):
|
38 |
+
super().__init__()
|
39 |
+
|
40 |
+
self.mode = mode
|
41 |
+
|
42 |
+
if self.mode == 'fix':
|
43 |
+
self.norm_fn = FixedBatchNorm
|
44 |
+
else:
|
45 |
+
self.norm_fn = nn.BatchNorm2d
|
46 |
+
|
47 |
+
if 'resnet' in model_name:
|
48 |
+
self.model = resnet.ResNet(resnet.Bottleneck, resnet.layers_dic[model_name], strides=(2, 2, 2, 1),
|
49 |
+
batch_norm_fn=self.norm_fn)
|
50 |
+
|
51 |
+
state_dict = model_zoo.load_url(resnet.urls_dic[model_name])
|
52 |
+
state_dict.pop('fc.weight')
|
53 |
+
state_dict.pop('fc.bias')
|
54 |
+
|
55 |
+
self.model.load_state_dict(state_dict)
|
56 |
+
else:
|
57 |
+
if segmentation:
|
58 |
+
dilation, dilated = 4, True
|
59 |
+
else:
|
60 |
+
dilation, dilated = 2, False
|
61 |
+
|
62 |
+
self.model = eval("resnest." + model_name)(pretrained=True, dilated=dilated, dilation=dilation,
|
63 |
+
norm_layer=self.norm_fn)
|
64 |
+
|
65 |
+
del self.model.avgpool
|
66 |
+
del self.model.fc
|
67 |
+
|
68 |
+
self.stage1 = nn.Sequential(self.model.conv1,
|
69 |
+
self.model.bn1,
|
70 |
+
self.model.relu,
|
71 |
+
self.model.maxpool)
|
72 |
+
self.stage2 = nn.Sequential(self.model.layer1)
|
73 |
+
self.stage3 = nn.Sequential(self.model.layer2)
|
74 |
+
self.stage4 = nn.Sequential(self.model.layer3)
|
75 |
+
self.stage5 = nn.Sequential(self.model.layer4)
|
76 |
+
|
77 |
+
class Classifier(Backbone):
|
78 |
+
def __init__(self, model_name, state_path, num_classes=20, mode='fix'):
|
79 |
+
super().__init__(model_name, state_path, num_classes, mode)
|
80 |
+
|
81 |
+
self.classifier = nn.Conv2d(2048, num_classes, 1, bias=False)
|
82 |
+
self.num_classes = num_classes
|
83 |
+
|
84 |
+
self.initialize([self.classifier])
|
85 |
+
|
86 |
+
def forward(self, x, with_cam=False):
|
87 |
+
x = self.stage1(x)
|
88 |
+
x = self.stage2(x)
|
89 |
+
x = self.stage3(x)
|
90 |
+
x = self.stage4(x)
|
91 |
+
x = self.stage5(x)
|
92 |
+
|
93 |
+
if with_cam:
|
94 |
+
features = self.classifier(x)
|
95 |
+
logits = self.global_average_pooling_2d(features)
|
96 |
+
return logits, features
|
97 |
+
else:
|
98 |
+
x = self.global_average_pooling_2d(x, keepdims=True)
|
99 |
+
logits = self.classifier(x).view(-1, self.num_classes)
|
100 |
+
return logits
|
101 |
+
|
102 |
+
class Classifier_For_Positive_Pooling(Backbone):
|
103 |
+
def __init__(self, model_name, num_classes=20, mode='fix'):
|
104 |
+
super().__init__(model_name, num_classes, mode)
|
105 |
+
|
106 |
+
self.classifier = nn.Conv2d(2048, num_classes, 1, bias=False)
|
107 |
+
self.num_classes = num_classes
|
108 |
+
|
109 |
+
self.initialize([self.classifier])
|
110 |
+
|
111 |
+
def forward(self, x, with_cam=False):
|
112 |
+
x = self.stage1(x)
|
113 |
+
x = self.stage2(x)
|
114 |
+
x = self.stage3(x)
|
115 |
+
x = self.stage4(x)
|
116 |
+
x = self.stage5(x)
|
117 |
+
|
118 |
+
if with_cam:
|
119 |
+
features = self.classifier(x)
|
120 |
+
logits = self.global_average_pooling_2d(features)
|
121 |
+
return logits, features
|
122 |
+
else:
|
123 |
+
x = self.global_average_pooling_2d(x, keepdims=True)
|
124 |
+
logits = self.classifier(x).view(-1, self.num_classes)
|
125 |
+
return logits
|
126 |
+
|
127 |
+
class Classifier_For_Puzzle(Classifier):
|
128 |
+
def __init__(self, model_name, num_classes=20, mode='fix'):
|
129 |
+
super().__init__(model_name, num_classes, mode)
|
130 |
+
|
131 |
+
def forward(self, x, num_pieces=1, level=-1):
|
132 |
+
batch_size = x.size()[0]
|
133 |
+
|
134 |
+
output_dic = {}
|
135 |
+
layers = [self.stage1, self.stage2, self.stage3, self.stage4, self.stage5, self.classifier]
|
136 |
+
|
137 |
+
for l, layer in enumerate(layers):
|
138 |
+
l += 1
|
139 |
+
if level == l:
|
140 |
+
x = tile_features(x, num_pieces)
|
141 |
+
|
142 |
+
x = layer(x)
|
143 |
+
output_dic['stage%d'%l] = x
|
144 |
+
|
145 |
+
output_dic['logits'] = self.global_average_pooling_2d(output_dic['stage6'])
|
146 |
+
|
147 |
+
for l in range(len(layers)):
|
148 |
+
l += 1
|
149 |
+
if l >= level:
|
150 |
+
output_dic['stage%d'%l] = merge_features(output_dic['stage%d'%l], num_pieces, batch_size)
|
151 |
+
|
152 |
+
if level is not None:
|
153 |
+
output_dic['merged_logits'] = self.global_average_pooling_2d(output_dic['stage6'])
|
154 |
+
|
155 |
+
return output_dic
|
156 |
+
|
157 |
+
class AffinityNet(Backbone):
|
158 |
+
def __init__(self, model_name, path_index=None):
|
159 |
+
super().__init__(model_name, None, 'fix')
|
160 |
+
|
161 |
+
if '50' in model_name:
|
162 |
+
fc_edge1_features = 64
|
163 |
+
else:
|
164 |
+
fc_edge1_features = 128
|
165 |
+
|
166 |
+
self.fc_edge1 = nn.Sequential(
|
167 |
+
nn.Conv2d(fc_edge1_features, 32, 1, bias=False),
|
168 |
+
nn.GroupNorm(4, 32),
|
169 |
+
nn.ReLU(inplace=True),
|
170 |
+
)
|
171 |
+
self.fc_edge2 = nn.Sequential(
|
172 |
+
nn.Conv2d(256, 32, 1, bias=False),
|
173 |
+
nn.GroupNorm(4, 32),
|
174 |
+
nn.ReLU(inplace=True),
|
175 |
+
)
|
176 |
+
self.fc_edge3 = nn.Sequential(
|
177 |
+
nn.Conv2d(512, 32, 1, bias=False),
|
178 |
+
nn.GroupNorm(4, 32),
|
179 |
+
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
|
180 |
+
nn.ReLU(inplace=True),
|
181 |
+
)
|
182 |
+
self.fc_edge4 = nn.Sequential(
|
183 |
+
nn.Conv2d(1024, 32, 1, bias=False),
|
184 |
+
nn.GroupNorm(4, 32),
|
185 |
+
nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False),
|
186 |
+
nn.ReLU(inplace=True),
|
187 |
+
)
|
188 |
+
self.fc_edge5 = nn.Sequential(
|
189 |
+
nn.Conv2d(2048, 32, 1, bias=False),
|
190 |
+
nn.GroupNorm(4, 32),
|
191 |
+
nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False),
|
192 |
+
nn.ReLU(inplace=True),
|
193 |
+
)
|
194 |
+
self.fc_edge6 = nn.Conv2d(160, 1, 1, bias=True)
|
195 |
+
|
196 |
+
self.backbone = nn.ModuleList([self.stage1, self.stage2, self.stage3, self.stage4, self.stage5])
|
197 |
+
self.edge_layers = nn.ModuleList([self.fc_edge1, self.fc_edge2, self.fc_edge3, self.fc_edge4, self.fc_edge5, self.fc_edge6])
|
198 |
+
|
199 |
+
if path_index is not None:
|
200 |
+
self.path_index = path_index
|
201 |
+
self.n_path_lengths = len(self.path_index.path_indices)
|
202 |
+
for i, pi in enumerate(self.path_index.path_indices):
|
203 |
+
self.register_buffer("path_indices_" + str(i), torch.from_numpy(pi))
|
204 |
+
|
205 |
+
def train(self, mode=True):
|
206 |
+
super().train(mode)
|
207 |
+
self.backbone.eval()
|
208 |
+
|
209 |
+
def forward(self, x, with_affinity=False):
|
210 |
+
x1 = self.stage1(x).detach()
|
211 |
+
x2 = self.stage2(x1).detach()
|
212 |
+
x3 = self.stage3(x2).detach()
|
213 |
+
x4 = self.stage4(x3).detach()
|
214 |
+
x5 = self.stage5(x4).detach()
|
215 |
+
|
216 |
+
edge1 = self.fc_edge1(x1)
|
217 |
+
edge2 = self.fc_edge2(x2)
|
218 |
+
edge3 = self.fc_edge3(x3)[..., :edge2.size(2), :edge2.size(3)]
|
219 |
+
edge4 = self.fc_edge4(x4)[..., :edge2.size(2), :edge2.size(3)]
|
220 |
+
edge5 = self.fc_edge5(x5)[..., :edge2.size(2), :edge2.size(3)]
|
221 |
+
|
222 |
+
edge = self.fc_edge6(torch.cat([edge1, edge2, edge3, edge4, edge5], dim=1))
|
223 |
+
|
224 |
+
if with_affinity:
|
225 |
+
return edge, self.to_affinity(torch.sigmoid(edge))
|
226 |
+
else:
|
227 |
+
return edge
|
228 |
+
|
229 |
+
def get_edge(self, x, image_size=512, stride=4):
|
230 |
+
feat_size = (x.size(2)-1)//stride+1, (x.size(3)-1)//stride+1
|
231 |
+
|
232 |
+
x = F.pad(x, [0, image_size-x.size(3), 0, image_size-x.size(2)])
|
233 |
+
edge_out = self.forward(x)
|
234 |
+
edge_out = edge_out[..., :feat_size[0], :feat_size[1]]
|
235 |
+
edge_out = torch.sigmoid(edge_out[0]/2 + edge_out[1].flip(-1)/2)
|
236 |
+
|
237 |
+
return edge_out
|
238 |
+
|
239 |
+
"""
|
240 |
+
aff = self.to_affinity(torch.sigmoid(edge_out))
|
241 |
+
pos_aff_loss = (-1) * torch.log(aff + 1e-5)
|
242 |
+
neg_aff_loss = (-1) * torch.log(1. + 1e-5 - aff)
|
243 |
+
"""
|
244 |
+
def to_affinity(self, edge):
|
245 |
+
aff_list = []
|
246 |
+
edge = edge.view(edge.size(0), -1)
|
247 |
+
|
248 |
+
for i in range(self.n_path_lengths):
|
249 |
+
ind = self._buffers["path_indices_" + str(i)]
|
250 |
+
ind_flat = ind.view(-1)
|
251 |
+
dist = torch.index_select(edge, dim=-1, index=ind_flat)
|
252 |
+
dist = dist.view(dist.size(0), ind.size(0), ind.size(1), ind.size(2))
|
253 |
+
aff = torch.squeeze(1 - F.max_pool2d(dist, (dist.size(2), 1)), dim=2)
|
254 |
+
aff_list.append(aff)
|
255 |
+
aff_cat = torch.cat(aff_list, dim=1)
|
256 |
+
return aff_cat
|
257 |
+
|
258 |
+
class DeepLabv3_Plus(Backbone):
|
259 |
+
def __init__(self, model_name, num_classes=21, mode='fix', use_group_norm=False):
|
260 |
+
super().__init__(model_name, num_classes, mode, segmentation=False)
|
261 |
+
|
262 |
+
if use_group_norm:
|
263 |
+
norm_fn_for_extra_modules = group_norm
|
264 |
+
else:
|
265 |
+
norm_fn_for_extra_modules = self.norm_fn
|
266 |
+
|
267 |
+
self.aspp = ASPP(output_stride=16, norm_fn=norm_fn_for_extra_modules)
|
268 |
+
self.decoder = Decoder(num_classes, 256, norm_fn_for_extra_modules)
|
269 |
+
|
270 |
+
def forward(self, x, with_cam=False):
|
271 |
+
inputs = x
|
272 |
+
|
273 |
+
x = self.stage1(x)
|
274 |
+
x = self.stage2(x)
|
275 |
+
x_low_level = x
|
276 |
+
|
277 |
+
x = self.stage3(x)
|
278 |
+
x = self.stage4(x)
|
279 |
+
x = self.stage5(x)
|
280 |
+
|
281 |
+
x = self.aspp(x)
|
282 |
+
x = self.decoder(x, x_low_level)
|
283 |
+
x = resize_for_tensors(x, inputs.size()[2:], align_corners=True)
|
284 |
+
|
285 |
+
return x
|
286 |
+
|
287 |
+
class Seg_Model(Backbone):
|
288 |
+
def __init__(self, model_name, num_classes=21):
|
289 |
+
super().__init__(model_name, num_classes, mode='fix', segmentation=False)
|
290 |
+
|
291 |
+
self.classifier = nn.Conv2d(2048, num_classes, 1, bias=False)
|
292 |
+
|
293 |
+
def forward(self, inputs):
|
294 |
+
x = self.stage1(inputs)
|
295 |
+
x = self.stage2(x)
|
296 |
+
x = self.stage3(x)
|
297 |
+
x = self.stage4(x)
|
298 |
+
x = self.stage5(x)
|
299 |
+
|
300 |
+
logits = self.classifier(x)
|
301 |
+
# logits = resize_for_tensors(logits, inputs.size()[2:], align_corners=False)
|
302 |
+
|
303 |
+
return logits
|
304 |
+
|
305 |
+
class CSeg_Model(Backbone):
|
306 |
+
def __init__(self, model_name, num_classes=21):
|
307 |
+
super().__init__(model_name, num_classes, 'fix')
|
308 |
+
|
309 |
+
if '50' in model_name:
|
310 |
+
fc_edge1_features = 64
|
311 |
+
else:
|
312 |
+
fc_edge1_features = 128
|
313 |
+
|
314 |
+
self.fc_edge1 = nn.Sequential(
|
315 |
+
nn.Conv2d(fc_edge1_features, 32, 1, bias=False),
|
316 |
+
nn.GroupNorm(4, 32),
|
317 |
+
nn.ReLU(inplace=True),
|
318 |
+
)
|
319 |
+
self.fc_edge2 = nn.Sequential(
|
320 |
+
nn.Conv2d(256, 32, 1, bias=False),
|
321 |
+
nn.GroupNorm(4, 32),
|
322 |
+
nn.ReLU(inplace=True),
|
323 |
+
)
|
324 |
+
self.fc_edge3 = nn.Sequential(
|
325 |
+
nn.Conv2d(512, 32, 1, bias=False),
|
326 |
+
nn.GroupNorm(4, 32),
|
327 |
+
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
|
328 |
+
nn.ReLU(inplace=True),
|
329 |
+
)
|
330 |
+
self.fc_edge4 = nn.Sequential(
|
331 |
+
nn.Conv2d(1024, 32, 1, bias=False),
|
332 |
+
nn.GroupNorm(4, 32),
|
333 |
+
nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False),
|
334 |
+
nn.ReLU(inplace=True),
|
335 |
+
)
|
336 |
+
self.fc_edge5 = nn.Sequential(
|
337 |
+
nn.Conv2d(2048, 32, 1, bias=False),
|
338 |
+
nn.GroupNorm(4, 32),
|
339 |
+
nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False),
|
340 |
+
nn.ReLU(inplace=True),
|
341 |
+
)
|
342 |
+
self.fc_edge6 = nn.Conv2d(160, num_classes, 1, bias=True)
|
343 |
+
|
344 |
+
def forward(self, x):
|
345 |
+
x1 = self.stage1(x)
|
346 |
+
x2 = self.stage2(x1)
|
347 |
+
x3 = self.stage3(x2)
|
348 |
+
x4 = self.stage4(x3)
|
349 |
+
x5 = self.stage5(x4)
|
350 |
+
|
351 |
+
edge1 = self.fc_edge1(x1)
|
352 |
+
edge2 = self.fc_edge2(x2)
|
353 |
+
edge3 = self.fc_edge3(x3)[..., :edge2.size(2), :edge2.size(3)]
|
354 |
+
edge4 = self.fc_edge4(x4)[..., :edge2.size(2), :edge2.size(3)]
|
355 |
+
edge5 = self.fc_edge5(x5)[..., :edge2.size(2), :edge2.size(3)]
|
356 |
+
|
357 |
+
logits = self.fc_edge6(torch.cat([edge1, edge2, edge3, edge4, edge5], dim=1))
|
358 |
+
# logits = resize_for_tensors(logits, x.size()[2:], align_corners=True)
|
359 |
+
|
360 |
+
return logits
|