snnetv2-semantic-segmentation / configs /setr /setr_vit-l_naive_8xb2-160k_ade20k-512x512.py
HubHop
update
412c852
_base_ = [
'../_base_/models/setr_naive.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
data_preprocessor=data_preprocessor,
pretrained=None,
backbone=dict(
img_size=(512, 512),
drop_rate=0.,
init_cfg=dict(
type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')),
decode_head=dict(num_classes=150),
auxiliary_head=[
dict(
type='SETRUPHead',
in_channels=1024,
channels=256,
in_index=0,
num_classes=150,
dropout_ratio=0,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
num_convs=2,
kernel_size=1,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='SETRUPHead',
in_channels=1024,
channels=256,
in_index=1,
num_classes=150,
dropout_ratio=0,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
num_convs=2,
kernel_size=1,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='SETRUPHead',
in_channels=1024,
channels=256,
in_index=2,
num_classes=150,
dropout_ratio=0,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
num_convs=2,
kernel_size=1,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))
],
test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)),
)
optimizer = dict(lr=0.01, weight_decay=0.0)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=optimizer,
paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)}))
# num_gpus: 8 -> batch_size: 16
train_dataloader = dict(batch_size=2)
val_dataloader = dict(batch_size=1)
test_dataloader = val_dataloader