TTP / mmpretrain /configs /convnext /convnext-small_32xb128_in1k-384px.py
KyanChen's picture
Upload 1861 files
3b96cb1
raw
history blame
816 Bytes
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_384 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=128)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)