zhong-al
commited on
Commit
·
2c26ac8
1
Parent(s):
aa912e2
Add model + config files
Browse files- .gitattributes +1 -0
- __init__.py +0 -0
- cfg.py +13 -0
- checkpoint_epoch_00075.pyth +3 -0
- config.yml +492 -0
- configuration_x3d.py +9 -0
- helpers/cfg.py +1286 -0
- helpers/head.py +146 -0
- helpers/norm.py +110 -0
- helpers/resnet.py +927 -0
- helpers/stem.py +320 -0
- modeling_x3d.py +15 -0
- x3d.py +350 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
checkpoint_epoch_00075.pyth filter=lfs diff=lfs merge=lfs -text
|
__init__.py
ADDED
File without changes
|
cfg.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
3 |
+
|
4 |
+
from x3d_model.helpers.cfg import get_cfg
|
5 |
+
|
6 |
+
def load_config(path_to_config=None):
|
7 |
+
# Setup cfg.
|
8 |
+
cfg = get_cfg()
|
9 |
+
|
10 |
+
# Load config from cfg.
|
11 |
+
if path_to_config is not None:
|
12 |
+
cfg.merge_from_file(path_to_config)
|
13 |
+
return cfg
|
checkpoint_epoch_00075.pyth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66ea6f31835ec44a91c7df23e304f429872c091b34a5447cd62ad7f1d1b3837e
|
3 |
+
size 43662374
|
config.yml
ADDED
@@ -0,0 +1,492 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
AUG:
|
2 |
+
AA_TYPE: rand-m9-mstd0.5-inc1
|
3 |
+
COLOR_JITTER: 0.4
|
4 |
+
ENABLE: false
|
5 |
+
GEN_MASK_LOADER: false
|
6 |
+
INTERPOLATION: bicubic
|
7 |
+
MASK_FRAMES: false
|
8 |
+
MASK_RATIO: 0.0
|
9 |
+
MASK_TUBE: false
|
10 |
+
MASK_WINDOW_SIZE:
|
11 |
+
- 8
|
12 |
+
- 7
|
13 |
+
- 7
|
14 |
+
MAX_MASK_PATCHES_PER_BLOCK: null
|
15 |
+
NUM_SAMPLE: 1
|
16 |
+
RE_COUNT: 1
|
17 |
+
RE_MODE: pixel
|
18 |
+
RE_PROB: 0.25
|
19 |
+
RE_SPLIT: false
|
20 |
+
AVA:
|
21 |
+
ANNOTATION_DIR: /mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/
|
22 |
+
BGR: false
|
23 |
+
DETECTION_SCORE_THRESH: 0.9
|
24 |
+
EXCLUSION_FILE: ava_val_excluded_timestamps_v2.2.csv
|
25 |
+
FRAME_DIR: /mnt/fair-flash3-east/ava_trainval_frames.img/
|
26 |
+
FRAME_LIST_DIR: /mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/
|
27 |
+
FULL_TEST_ON_VAL: false
|
28 |
+
GROUNDTRUTH_FILE: ava_val_v2.2.csv
|
29 |
+
IMG_PROC_BACKEND: cv2
|
30 |
+
LABEL_MAP_FILE: ava_action_list_v2.2_for_activitynet_2019.pbtxt
|
31 |
+
TEST_FORCE_FLIP: false
|
32 |
+
TEST_LISTS:
|
33 |
+
- val.csv
|
34 |
+
TEST_PREDICT_BOX_LISTS:
|
35 |
+
- ava_val_predicted_boxes.csv
|
36 |
+
TRAIN_GT_BOX_LISTS:
|
37 |
+
- ava_train_v2.2.csv
|
38 |
+
TRAIN_LISTS:
|
39 |
+
- train.csv
|
40 |
+
TRAIN_PCA_JITTER_ONLY: true
|
41 |
+
TRAIN_PREDICT_BOX_LISTS: []
|
42 |
+
TRAIN_USE_COLOR_AUGMENTATION: false
|
43 |
+
BENCHMARK:
|
44 |
+
LOG_PERIOD: 100
|
45 |
+
NUM_EPOCHS: 5
|
46 |
+
SHUFFLE: true
|
47 |
+
BN:
|
48 |
+
GLOBAL_SYNC: false
|
49 |
+
NORM_TYPE: sync_batchnorm
|
50 |
+
NUM_BATCHES_PRECISE: 200
|
51 |
+
NUM_SPLITS: 1
|
52 |
+
NUM_SYNC_DEVICES: 1
|
53 |
+
USE_PRECISE_STATS: true
|
54 |
+
WEIGHT_DECAY: 0.0
|
55 |
+
CONTRASTIVE:
|
56 |
+
BN_MLP: false
|
57 |
+
BN_SYNC_MLP: false
|
58 |
+
DELTA_CLIPS_MAX: .inf
|
59 |
+
DELTA_CLIPS_MIN: -.inf
|
60 |
+
DIM: 128
|
61 |
+
INTERP_MEMORY: false
|
62 |
+
KNN_ON: true
|
63 |
+
LENGTH: 239975
|
64 |
+
LOCAL_SHUFFLE_BN: true
|
65 |
+
MEM_TYPE: 1d
|
66 |
+
MLP_DIM: 2048
|
67 |
+
MOCO_MULTI_VIEW_QUEUE: false
|
68 |
+
MOMENTUM: 0.5
|
69 |
+
MOMENTUM_ANNEALING: false
|
70 |
+
NUM_CLASSES_DOWNSTREAM: 400
|
71 |
+
NUM_MLP_LAYERS: 1
|
72 |
+
PREDICTOR_DEPTHS: []
|
73 |
+
QUEUE_LEN: 65536
|
74 |
+
SEQUENTIAL: false
|
75 |
+
SIMCLR_DIST_ON: true
|
76 |
+
SWAV_QEUE_LEN: 0
|
77 |
+
T: 0.07
|
78 |
+
TYPE: mem
|
79 |
+
DATA:
|
80 |
+
COLOR_RND_GRAYSCALE: 0.0
|
81 |
+
DECODING_BACKEND: torchvision
|
82 |
+
DECODING_SHORT_SIZE: 256
|
83 |
+
DUMMY_LOAD: false
|
84 |
+
ENSEMBLE_METHOD: max
|
85 |
+
IN22K_TRAINVAL: false
|
86 |
+
IN22k_VAL_IN1K: ''
|
87 |
+
INPUT_CHANNEL_NUM:
|
88 |
+
- 3
|
89 |
+
INV_UNIFORM_SAMPLE: true
|
90 |
+
IN_VAL_CROP_RATIO: 0.875
|
91 |
+
LOADER_CHUNK_OVERALL_SIZE: 0
|
92 |
+
LOADER_CHUNK_SIZE: 0
|
93 |
+
MEAN:
|
94 |
+
- 0.45
|
95 |
+
- 0.45
|
96 |
+
- 0.45
|
97 |
+
MULTI_LABEL: true
|
98 |
+
NUM_FRAMES: 16
|
99 |
+
PATH_LABEL_SEPARATOR: ' '
|
100 |
+
PATH_PREFIX: kabr/KABR/dataset/image
|
101 |
+
PATH_TO_DATA_DIR: kabr/KABR/annotation
|
102 |
+
PATH_TO_PRELOAD_IMDB: ''
|
103 |
+
RANDOM_FLIP: true
|
104 |
+
REVERSE_INPUT_CHANNEL: true
|
105 |
+
SAMPLING_RATE: 5
|
106 |
+
SKIP_ROWS: 0
|
107 |
+
SSL_BLUR_SIGMA_MAX:
|
108 |
+
- 0.0
|
109 |
+
- 2.0
|
110 |
+
SSL_BLUR_SIGMA_MIN:
|
111 |
+
- 0.0
|
112 |
+
- 0.1
|
113 |
+
SSL_COLOR_BRI_CON_SAT:
|
114 |
+
- 0.2
|
115 |
+
- 0.2
|
116 |
+
- 0.2
|
117 |
+
SSL_COLOR_HUE: 0.1
|
118 |
+
SSL_COLOR_JITTER: true
|
119 |
+
SSL_MOCOV2_AUG: false
|
120 |
+
STD:
|
121 |
+
- 0.225
|
122 |
+
- 0.225
|
123 |
+
- 0.225
|
124 |
+
TARGET_FPS: 30
|
125 |
+
TEST_CROP_SIZE: 300
|
126 |
+
TIME_DIFF_PROB: 0.0
|
127 |
+
TRAIN_CROP_NUM_SPATIAL: 1
|
128 |
+
TRAIN_CROP_NUM_TEMPORAL: 1
|
129 |
+
TRAIN_CROP_SIZE: 300
|
130 |
+
TRAIN_JITTER_ASPECT_RELATIVE: []
|
131 |
+
TRAIN_JITTER_FPS: 0.0
|
132 |
+
TRAIN_JITTER_MOTION_SHIFT: false
|
133 |
+
TRAIN_JITTER_SCALES:
|
134 |
+
- 300
|
135 |
+
- 400
|
136 |
+
TRAIN_JITTER_SCALES_RELATIVE: []
|
137 |
+
TRAIN_PCA_EIGVAL:
|
138 |
+
- 0.225
|
139 |
+
- 0.224
|
140 |
+
- 0.229
|
141 |
+
TRAIN_PCA_EIGVEC:
|
142 |
+
- - -0.5675
|
143 |
+
- 0.7192
|
144 |
+
- 0.4009
|
145 |
+
- - -0.5808
|
146 |
+
- -0.0045
|
147 |
+
- -0.814
|
148 |
+
- - -0.5836
|
149 |
+
- -0.6948
|
150 |
+
- 0.4203
|
151 |
+
USE_OFFSET_SAMPLING: false
|
152 |
+
DATA_LOADER:
|
153 |
+
ENABLE_MULTI_THREAD_DECODE: false
|
154 |
+
NUM_WORKERS: 8
|
155 |
+
PIN_MEMORY: true
|
156 |
+
DEMO:
|
157 |
+
BUFFER_SIZE: 0
|
158 |
+
CLIP_VIS_SIZE: 10
|
159 |
+
COMMON_CLASS_NAMES:
|
160 |
+
- watch (a person)
|
161 |
+
- talk to (e.g., self, a person, a group)
|
162 |
+
- listen to (a person)
|
163 |
+
- touch (an object)
|
164 |
+
- carry/hold (an object)
|
165 |
+
- walk
|
166 |
+
- sit
|
167 |
+
- lie/sleep
|
168 |
+
- bend/bow (at the waist)
|
169 |
+
COMMON_CLASS_THRES: 0.7
|
170 |
+
DETECTRON2_CFG: COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml
|
171 |
+
DETECTRON2_THRESH: 0.9
|
172 |
+
DETECTRON2_WEIGHTS: detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl
|
173 |
+
DISPLAY_HEIGHT: 0
|
174 |
+
DISPLAY_WIDTH: 0
|
175 |
+
ENABLE: false
|
176 |
+
FPS: 30
|
177 |
+
GT_BOXES: ''
|
178 |
+
INPUT_FORMAT: BGR
|
179 |
+
INPUT_VIDEO: kabr/KABR/dataset/video/G0103.mp4
|
180 |
+
LABEL_FILE_PATH: kabr/KABR/annotation/classes.json
|
181 |
+
NUM_CLIPS_SKIP: 1
|
182 |
+
NUM_VIS_INSTANCES: 1
|
183 |
+
OUTPUT_FILE: kabr/KABR/dataset/predict/G0103.mp4
|
184 |
+
OUTPUT_FPS: -1
|
185 |
+
PREDS_BOXES: ''
|
186 |
+
SLOWMO: 1
|
187 |
+
STARTING_SECOND: 900
|
188 |
+
THREAD_ENABLE: false
|
189 |
+
UNCOMMON_CLASS_THRES: 0.3
|
190 |
+
VIS_MODE: thres
|
191 |
+
WEBCAM: -1
|
192 |
+
DETECTION:
|
193 |
+
ALIGNED: true
|
194 |
+
ENABLE: false
|
195 |
+
ROI_XFORM_RESOLUTION: 7
|
196 |
+
SPATIAL_SCALE_FACTOR: 16
|
197 |
+
DIST_BACKEND: nccl
|
198 |
+
LOG_MODEL_INFO: true
|
199 |
+
LOG_PERIOD: 10
|
200 |
+
MASK:
|
201 |
+
DECODER_DEPTH: 0
|
202 |
+
DECODER_EMBED_DIM: 512
|
203 |
+
DECODER_SEP_POS_EMBED: false
|
204 |
+
DEC_KV_KERNEL: []
|
205 |
+
DEC_KV_STRIDE: []
|
206 |
+
ENABLE: false
|
207 |
+
HEAD_TYPE: separate
|
208 |
+
MAE_ON: false
|
209 |
+
MAE_RND_MASK: false
|
210 |
+
NORM_PRED_PIXEL: true
|
211 |
+
PER_FRAME_MASKING: false
|
212 |
+
PRED_HOG: false
|
213 |
+
PRETRAIN_DEPTH:
|
214 |
+
- 15
|
215 |
+
SCALE_INIT_BY_DEPTH: false
|
216 |
+
TIME_STRIDE_LOSS: true
|
217 |
+
MIXUP:
|
218 |
+
ALPHA: 0.8
|
219 |
+
CUTMIX_ALPHA: 1.0
|
220 |
+
ENABLE: false
|
221 |
+
LABEL_SMOOTH_VALUE: 0.1
|
222 |
+
PROB: 1.0
|
223 |
+
SWITCH_PROB: 0.5
|
224 |
+
MODEL:
|
225 |
+
ACT_CHECKPOINT: false
|
226 |
+
ARCH: x3d
|
227 |
+
DETACH_FINAL_FC: false
|
228 |
+
DROPCONNECT_RATE: 0.0
|
229 |
+
DROPOUT_RATE: 0.5
|
230 |
+
FC_INIT_STD: 0.01
|
231 |
+
FP16_ALLREDUCE: false
|
232 |
+
FROZEN_BN: false
|
233 |
+
HEAD_ACT: sigmoid
|
234 |
+
LOSS_FUNC: EQL
|
235 |
+
MODEL_NAME: X3D
|
236 |
+
MULTI_PATHWAY_ARCH:
|
237 |
+
- slowfast
|
238 |
+
NUM_CLASSES: 8
|
239 |
+
SINGLE_PATHWAY_ARCH:
|
240 |
+
- 2d
|
241 |
+
- c2d
|
242 |
+
- i3d
|
243 |
+
- slow
|
244 |
+
- x3d
|
245 |
+
- mvit
|
246 |
+
- maskmvit
|
247 |
+
MULTIGRID:
|
248 |
+
BN_BASE_SIZE: 8
|
249 |
+
DEFAULT_B: 0
|
250 |
+
DEFAULT_S: 0
|
251 |
+
DEFAULT_T: 0
|
252 |
+
EPOCH_FACTOR: 1.5
|
253 |
+
EVAL_FREQ: 3
|
254 |
+
LONG_CYCLE: false
|
255 |
+
LONG_CYCLE_FACTORS:
|
256 |
+
- - 0.25
|
257 |
+
- 0.7071067811865476
|
258 |
+
- - 0.5
|
259 |
+
- 0.7071067811865476
|
260 |
+
- - 0.5
|
261 |
+
- 1
|
262 |
+
- - 1
|
263 |
+
- 1
|
264 |
+
LONG_CYCLE_SAMPLING_RATE: 0
|
265 |
+
SHORT_CYCLE: false
|
266 |
+
SHORT_CYCLE_FACTORS:
|
267 |
+
- 0.5
|
268 |
+
- 0.7071067811865476
|
269 |
+
MVIT:
|
270 |
+
CLS_EMBED_ON: true
|
271 |
+
DEPTH: 16
|
272 |
+
DIM_MUL: []
|
273 |
+
DIM_MUL_IN_ATT: false
|
274 |
+
DROPOUT_RATE: 0.0
|
275 |
+
DROPPATH_RATE: 0.1
|
276 |
+
EMBED_DIM: 96
|
277 |
+
HEAD_INIT_SCALE: 1.0
|
278 |
+
HEAD_MUL: []
|
279 |
+
LAYER_SCALE_INIT_VALUE: 0.0
|
280 |
+
MLP_RATIO: 4.0
|
281 |
+
MODE: conv
|
282 |
+
NORM: layernorm
|
283 |
+
NORM_STEM: false
|
284 |
+
NUM_HEADS: 1
|
285 |
+
PATCH_2D: false
|
286 |
+
PATCH_KERNEL:
|
287 |
+
- 3
|
288 |
+
- 7
|
289 |
+
- 7
|
290 |
+
PATCH_PADDING:
|
291 |
+
- 2
|
292 |
+
- 4
|
293 |
+
- 4
|
294 |
+
PATCH_STRIDE:
|
295 |
+
- 2
|
296 |
+
- 4
|
297 |
+
- 4
|
298 |
+
POOL_FIRST: false
|
299 |
+
POOL_KVQ_KERNEL: null
|
300 |
+
POOL_KV_STRIDE: []
|
301 |
+
POOL_KV_STRIDE_ADAPTIVE: null
|
302 |
+
POOL_Q_STRIDE: []
|
303 |
+
QKV_BIAS: true
|
304 |
+
REL_POS_SPATIAL: false
|
305 |
+
REL_POS_TEMPORAL: false
|
306 |
+
REL_POS_ZERO_INIT: false
|
307 |
+
RESIDUAL_POOLING: false
|
308 |
+
REV:
|
309 |
+
BUFFER_LAYERS: []
|
310 |
+
ENABLE: false
|
311 |
+
PRE_Q_FUSION: avg
|
312 |
+
RESPATH_FUSE: concat
|
313 |
+
RES_PATH: conv
|
314 |
+
SEPARATE_QKV: false
|
315 |
+
SEP_POS_EMBED: false
|
316 |
+
USE_ABS_POS: true
|
317 |
+
USE_FIXED_SINCOS_POS: false
|
318 |
+
USE_MEAN_POOLING: false
|
319 |
+
ZERO_DECAY_POS_CLS: true
|
320 |
+
NONLOCAL:
|
321 |
+
GROUP:
|
322 |
+
- - 1
|
323 |
+
- - 1
|
324 |
+
- - 1
|
325 |
+
- - 1
|
326 |
+
INSTANTIATION: dot_product
|
327 |
+
LOCATION:
|
328 |
+
- - []
|
329 |
+
- - []
|
330 |
+
- - []
|
331 |
+
- - []
|
332 |
+
POOL:
|
333 |
+
- - - 1
|
334 |
+
- 2
|
335 |
+
- 2
|
336 |
+
- - 1
|
337 |
+
- 2
|
338 |
+
- 2
|
339 |
+
- - - 1
|
340 |
+
- 2
|
341 |
+
- 2
|
342 |
+
- - 1
|
343 |
+
- 2
|
344 |
+
- 2
|
345 |
+
- - - 1
|
346 |
+
- 2
|
347 |
+
- 2
|
348 |
+
- - 1
|
349 |
+
- 2
|
350 |
+
- 2
|
351 |
+
- - - 1
|
352 |
+
- 2
|
353 |
+
- 2
|
354 |
+
- - 1
|
355 |
+
- 2
|
356 |
+
- 2
|
357 |
+
NUM_GPUS: 8
|
358 |
+
NUM_SHARDS: 1
|
359 |
+
OUTPUT_DIR: kabr/KABR/logs/x3d-l-kabr
|
360 |
+
RESNET:
|
361 |
+
DEPTH: 50
|
362 |
+
INPLACE_RELU: true
|
363 |
+
NUM_BLOCK_TEMP_KERNEL:
|
364 |
+
- - 3
|
365 |
+
- - 4
|
366 |
+
- - 6
|
367 |
+
- - 3
|
368 |
+
NUM_GROUPS: 1
|
369 |
+
SPATIAL_DILATIONS:
|
370 |
+
- - 1
|
371 |
+
- - 1
|
372 |
+
- - 1
|
373 |
+
- - 1
|
374 |
+
SPATIAL_STRIDES:
|
375 |
+
- - 1
|
376 |
+
- - 2
|
377 |
+
- - 2
|
378 |
+
- - 2
|
379 |
+
STRIDE_1X1: false
|
380 |
+
TRANS_FUNC: x3d_transform
|
381 |
+
WIDTH_PER_GROUP: 64
|
382 |
+
ZERO_INIT_FINAL_BN: true
|
383 |
+
ZERO_INIT_FINAL_CONV: false
|
384 |
+
RNG_SEED: 0
|
385 |
+
SHARD_ID: 0
|
386 |
+
SLOWFAST:
|
387 |
+
ALPHA: 8
|
388 |
+
BETA_INV: 8
|
389 |
+
FUSION_CONV_CHANNEL_RATIO: 2
|
390 |
+
FUSION_KERNEL_SZ: 5
|
391 |
+
SOLVER:
|
392 |
+
BASE_LR: 0.05
|
393 |
+
BASE_LR_SCALE_NUM_SHARDS: true
|
394 |
+
BETAS:
|
395 |
+
- 0.9
|
396 |
+
- 0.999
|
397 |
+
CLIP_GRAD_L2NORM: null
|
398 |
+
CLIP_GRAD_VAL: null
|
399 |
+
COSINE_AFTER_WARMUP: false
|
400 |
+
COSINE_END_LR: 0.0
|
401 |
+
DAMPENING: 0.0
|
402 |
+
GAMMA: 0.1
|
403 |
+
LARS_ON: false
|
404 |
+
LAYER_DECAY: 1.0
|
405 |
+
LRS: []
|
406 |
+
LR_POLICY: cosine
|
407 |
+
MAX_EPOCH: 120
|
408 |
+
MOMENTUM: 0.9
|
409 |
+
NESTEROV: true
|
410 |
+
OPTIMIZING_METHOD: sgd
|
411 |
+
STEPS: []
|
412 |
+
STEP_SIZE: 1
|
413 |
+
WARMUP_EPOCHS: 35.0
|
414 |
+
WARMUP_FACTOR: 0.1
|
415 |
+
WARMUP_START_LR: 0.01
|
416 |
+
WEIGHT_DECAY: 5.0e-05
|
417 |
+
ZERO_WD_1D_PARAM: false
|
418 |
+
TASK: ''
|
419 |
+
TENSORBOARD:
|
420 |
+
CATEGORIES_PATH: ''
|
421 |
+
CLASS_NAMES_PATH: kabr/KABR/annotation/classes.json
|
422 |
+
CONFUSION_MATRIX:
|
423 |
+
ENABLE: true
|
424 |
+
FIGSIZE:
|
425 |
+
- 8
|
426 |
+
- 8
|
427 |
+
SUBSET_PATH: kabr/KABR/annotation/classes.txt
|
428 |
+
ENABLE: true
|
429 |
+
HISTOGRAM:
|
430 |
+
ENABLE: true
|
431 |
+
FIGSIZE:
|
432 |
+
- 8
|
433 |
+
- 8
|
434 |
+
SUBSET_PATH: kabr/KABR/annotation/classes.txt
|
435 |
+
TOPK: 3
|
436 |
+
LOG_DIR: ''
|
437 |
+
MODEL_VIS:
|
438 |
+
ACTIVATIONS: true
|
439 |
+
COLORMAP: Pastel2
|
440 |
+
ENABLE: true
|
441 |
+
GRAD_CAM:
|
442 |
+
COLORMAP: viridis
|
443 |
+
ENABLE: true
|
444 |
+
LAYER_LIST:
|
445 |
+
- s5/pathway0_res14
|
446 |
+
USE_TRUE_LABEL: false
|
447 |
+
INPUT_VIDEO: true
|
448 |
+
LAYER_LIST:
|
449 |
+
- s5/pathway0_res14
|
450 |
+
MODEL_WEIGHTS: true
|
451 |
+
TOPK_PREDS: 1
|
452 |
+
PREDICTIONS_PATH: ''
|
453 |
+
WRONG_PRED_VIS:
|
454 |
+
ENABLE: false
|
455 |
+
SUBSET_PATH: ''
|
456 |
+
TAG: Incorrectly classified videos.
|
457 |
+
TEST:
|
458 |
+
BATCH_SIZE: 64
|
459 |
+
CHECKPOINT_FILE_PATH: ''
|
460 |
+
CHECKPOINT_TYPE: pytorch
|
461 |
+
DATASET: charades
|
462 |
+
ENABLE: false
|
463 |
+
NUM_ENSEMBLE_VIEWS: 2
|
464 |
+
NUM_SPATIAL_CROPS: 1
|
465 |
+
NUM_TEMPORAL_CLIPS: []
|
466 |
+
SAVE_RESULTS_PATH: kabr/KABR/logs/x3d-l-kabr/results.txt
|
467 |
+
TRAIN:
|
468 |
+
AUTO_RESUME: true
|
469 |
+
BATCH_SIZE: 64
|
470 |
+
CHECKPOINT_CLEAR_NAME_PATTERN: []
|
471 |
+
CHECKPOINT_EPOCH_RESET: true
|
472 |
+
CHECKPOINT_FILE_PATH: slowfast/projects/x3d/x3d_l.pyth
|
473 |
+
CHECKPOINT_INFLATE: false
|
474 |
+
CHECKPOINT_IN_INIT: false
|
475 |
+
CHECKPOINT_PERIOD: 5
|
476 |
+
CHECKPOINT_TYPE: pytorch
|
477 |
+
DATASET: charades
|
478 |
+
ENABLE: true
|
479 |
+
EVAL_PERIOD: 5
|
480 |
+
KILL_LOSS_EXPLOSION_FACTOR: 0.0
|
481 |
+
MIXED_PRECISION: false
|
482 |
+
VIS_MASK:
|
483 |
+
ENABLE: false
|
484 |
+
X3D:
|
485 |
+
BN_LIN5: false
|
486 |
+
BOTTLENECK_FACTOR: 2.25
|
487 |
+
CHANNELWISE_3x3x3: true
|
488 |
+
DEPTH_FACTOR: 5.0
|
489 |
+
DIM_C1: 12
|
490 |
+
DIM_C5: 2048
|
491 |
+
SCALE_RES2: false
|
492 |
+
WIDTH_FACTOR: 2.0
|
configuration_x3d.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
from x3d_model.cfg import load_config
|
3 |
+
|
4 |
+
class X3DConfig(PretrainedConfig):
|
5 |
+
model_type = "x3d"
|
6 |
+
|
7 |
+
def __init__(self, path: str = None, **kwargs):
|
8 |
+
super().__init__(**kwargs)
|
9 |
+
self.cfg = load_config(path)
|
helpers/cfg.py
ADDED
@@ -0,0 +1,1286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
3 |
+
|
4 |
+
"""Configs."""
|
5 |
+
import math
|
6 |
+
|
7 |
+
from fvcore.common.config import CfgNode
|
8 |
+
|
9 |
+
# -----------------------------------------------------------------------------
|
10 |
+
# Config definition
|
11 |
+
# -----------------------------------------------------------------------------
|
12 |
+
_C = CfgNode()
|
13 |
+
|
14 |
+
# -----------------------------------------------------------------------------
|
15 |
+
# Contrastive Model (for MoCo, SimCLR, SwAV, BYOL)
|
16 |
+
# -----------------------------------------------------------------------------
|
17 |
+
|
18 |
+
_C.CONTRASTIVE = CfgNode()
|
19 |
+
|
20 |
+
# temperature used for contrastive losses
|
21 |
+
_C.CONTRASTIVE.T = 0.07
|
22 |
+
|
23 |
+
# output dimension for the loss
|
24 |
+
_C.CONTRASTIVE.DIM = 128
|
25 |
+
|
26 |
+
# number of training samples (for kNN bank)
|
27 |
+
_C.CONTRASTIVE.LENGTH = 239975
|
28 |
+
|
29 |
+
# the length of MoCo's and MemBanks' queues
|
30 |
+
_C.CONTRASTIVE.QUEUE_LEN = 65536
|
31 |
+
|
32 |
+
# momentum for momentum encoder updates
|
33 |
+
_C.CONTRASTIVE.MOMENTUM = 0.5
|
34 |
+
|
35 |
+
# wether to anneal momentum to value above with cosine schedule
|
36 |
+
_C.CONTRASTIVE.MOMENTUM_ANNEALING = False
|
37 |
+
|
38 |
+
# either memorybank, moco, simclr, byol, swav
|
39 |
+
_C.CONTRASTIVE.TYPE = "mem"
|
40 |
+
|
41 |
+
# wether to interpolate memorybank in time
|
42 |
+
_C.CONTRASTIVE.INTERP_MEMORY = False
|
43 |
+
|
44 |
+
# 1d or 2d (+temporal) memory
|
45 |
+
_C.CONTRASTIVE.MEM_TYPE = "1d"
|
46 |
+
|
47 |
+
# number of classes for online kNN evaluation
|
48 |
+
_C.CONTRASTIVE.NUM_CLASSES_DOWNSTREAM = 400
|
49 |
+
|
50 |
+
# use an MLP projection with these num layers
|
51 |
+
_C.CONTRASTIVE.NUM_MLP_LAYERS = 1
|
52 |
+
|
53 |
+
# dimension of projection and predictor MLPs
|
54 |
+
_C.CONTRASTIVE.MLP_DIM = 2048
|
55 |
+
|
56 |
+
# use BN in projection/prediction MLP
|
57 |
+
_C.CONTRASTIVE.BN_MLP = False
|
58 |
+
|
59 |
+
# use synchronized BN in projection/prediction MLP
|
60 |
+
_C.CONTRASTIVE.BN_SYNC_MLP = False
|
61 |
+
|
62 |
+
# shuffle BN only locally vs. across machines
|
63 |
+
_C.CONTRASTIVE.LOCAL_SHUFFLE_BN = True
|
64 |
+
|
65 |
+
# Wether to fill multiple clips (or just the first) into queue
|
66 |
+
_C.CONTRASTIVE.MOCO_MULTI_VIEW_QUEUE = False
|
67 |
+
|
68 |
+
# if sampling multiple clips per vid they need to be at least min frames apart
|
69 |
+
_C.CONTRASTIVE.DELTA_CLIPS_MIN = -math.inf
|
70 |
+
|
71 |
+
# if sampling multiple clips per vid they can be max frames apart
|
72 |
+
_C.CONTRASTIVE.DELTA_CLIPS_MAX = math.inf
|
73 |
+
|
74 |
+
# if non empty, use predictors with depth specified
|
75 |
+
_C.CONTRASTIVE.PREDICTOR_DEPTHS = []
|
76 |
+
|
77 |
+
# Wether to sequentially process multiple clips (=lower mem usage) or batch them
|
78 |
+
_C.CONTRASTIVE.SEQUENTIAL = False
|
79 |
+
|
80 |
+
# Wether to perform SimCLR loss across machines (or only locally)
|
81 |
+
_C.CONTRASTIVE.SIMCLR_DIST_ON = True
|
82 |
+
|
83 |
+
# Length of queue used in SwAV
|
84 |
+
_C.CONTRASTIVE.SWAV_QEUE_LEN = 0
|
85 |
+
|
86 |
+
# Wether to run online kNN evaluation during training
|
87 |
+
_C.CONTRASTIVE.KNN_ON = True
|
88 |
+
|
89 |
+
|
90 |
+
# ---------------------------------------------------------------------------- #
|
91 |
+
# Batch norm options
|
92 |
+
# ---------------------------------------------------------------------------- #
|
93 |
+
_C.BN = CfgNode()
|
94 |
+
|
95 |
+
# Precise BN stats.
|
96 |
+
_C.BN.USE_PRECISE_STATS = False
|
97 |
+
|
98 |
+
# Number of samples use to compute precise bn.
|
99 |
+
_C.BN.NUM_BATCHES_PRECISE = 200
|
100 |
+
|
101 |
+
# Weight decay value that applies on BN.
|
102 |
+
_C.BN.WEIGHT_DECAY = 0.0
|
103 |
+
|
104 |
+
# Norm type, options include `batchnorm`, `sub_batchnorm`, `sync_batchnorm`
|
105 |
+
_C.BN.NORM_TYPE = "batchnorm"
|
106 |
+
|
107 |
+
# Parameter for SubBatchNorm, where it splits the batch dimension into
|
108 |
+
# NUM_SPLITS splits, and run BN on each of them separately independently.
|
109 |
+
_C.BN.NUM_SPLITS = 1
|
110 |
+
|
111 |
+
# Parameter for NaiveSyncBatchNorm, where the stats across `NUM_SYNC_DEVICES`
|
112 |
+
# devices will be synchronized. `NUM_SYNC_DEVICES` cannot be larger than number of
|
113 |
+
# devices per machine; if global sync is desired, set `GLOBAL_SYNC`.
|
114 |
+
# By default ONLY applies to NaiveSyncBatchNorm3d; consider also setting
|
115 |
+
# CONTRASTIVE.BN_SYNC_MLP if appropriate.
|
116 |
+
_C.BN.NUM_SYNC_DEVICES = 1
|
117 |
+
|
118 |
+
# Parameter for NaiveSyncBatchNorm. Setting `GLOBAL_SYNC` to True synchronizes
|
119 |
+
# stats across all devices, across all machines; in this case, `NUM_SYNC_DEVICES`
|
120 |
+
# must be set to None.
|
121 |
+
# By default ONLY applies to NaiveSyncBatchNorm3d; consider also setting
|
122 |
+
# CONTRASTIVE.BN_SYNC_MLP if appropriate.
|
123 |
+
_C.BN.GLOBAL_SYNC = False
|
124 |
+
|
125 |
+
# ---------------------------------------------------------------------------- #
|
126 |
+
# Training options.
|
127 |
+
# ---------------------------------------------------------------------------- #
|
128 |
+
_C.TRAIN = CfgNode()
|
129 |
+
|
130 |
+
# If True Train the model, else skip training.
|
131 |
+
_C.TRAIN.ENABLE = True
|
132 |
+
|
133 |
+
# Kill training if loss explodes over this ratio from the previous 5 measurements.
|
134 |
+
# Only enforced if > 0.0
|
135 |
+
_C.TRAIN.KILL_LOSS_EXPLOSION_FACTOR = 0.0
|
136 |
+
|
137 |
+
# Dataset.
|
138 |
+
_C.TRAIN.DATASET = "kinetics"
|
139 |
+
|
140 |
+
# Total mini-batch size.
|
141 |
+
_C.TRAIN.BATCH_SIZE = 64
|
142 |
+
|
143 |
+
# Evaluate model on test data every eval period epochs.
|
144 |
+
_C.TRAIN.EVAL_PERIOD = 10
|
145 |
+
|
146 |
+
# Save model checkpoint every checkpoint period epochs.
|
147 |
+
_C.TRAIN.CHECKPOINT_PERIOD = 10
|
148 |
+
|
149 |
+
# Resume training from the latest checkpoint in the output directory.
|
150 |
+
_C.TRAIN.AUTO_RESUME = True
|
151 |
+
|
152 |
+
# Path to the checkpoint to load the initial weight.
|
153 |
+
_C.TRAIN.CHECKPOINT_FILE_PATH = ""
|
154 |
+
|
155 |
+
# Checkpoint types include `caffe2` or `pytorch`.
|
156 |
+
_C.TRAIN.CHECKPOINT_TYPE = "pytorch"
|
157 |
+
|
158 |
+
# If True, perform inflation when loading checkpoint.
|
159 |
+
_C.TRAIN.CHECKPOINT_INFLATE = False
|
160 |
+
|
161 |
+
# If True, reset epochs when loading checkpoint.
|
162 |
+
_C.TRAIN.CHECKPOINT_EPOCH_RESET = False
|
163 |
+
|
164 |
+
# If set, clear all layer names according to the pattern provided.
|
165 |
+
_C.TRAIN.CHECKPOINT_CLEAR_NAME_PATTERN = () # ("backbone.",)
|
166 |
+
|
167 |
+
# If True, use FP16 for activations
|
168 |
+
_C.TRAIN.MIXED_PRECISION = False
|
169 |
+
|
170 |
+
# if True, inflate some params from imagenet model.
|
171 |
+
_C.TRAIN.CHECKPOINT_IN_INIT = False
|
172 |
+
|
173 |
+
# ---------------------------------------------------------------------------- #
|
174 |
+
# Augmentation options.
|
175 |
+
# ---------------------------------------------------------------------------- #
|
176 |
+
_C.AUG = CfgNode()
|
177 |
+
|
178 |
+
# Whether to enable randaug.
|
179 |
+
_C.AUG.ENABLE = False
|
180 |
+
|
181 |
+
# Number of repeated augmentations to used during training.
|
182 |
+
# If this is greater than 1, then the actual batch size is
|
183 |
+
# TRAIN.BATCH_SIZE * AUG.NUM_SAMPLE.
|
184 |
+
_C.AUG.NUM_SAMPLE = 1
|
185 |
+
|
186 |
+
# Not used if using randaug.
|
187 |
+
_C.AUG.COLOR_JITTER = 0.4
|
188 |
+
|
189 |
+
# RandAug parameters.
|
190 |
+
_C.AUG.AA_TYPE = "rand-m9-mstd0.5-inc1"
|
191 |
+
|
192 |
+
# Interpolation method.
|
193 |
+
_C.AUG.INTERPOLATION = "bicubic"
|
194 |
+
|
195 |
+
# Probability of random erasing.
|
196 |
+
_C.AUG.RE_PROB = 0.25
|
197 |
+
|
198 |
+
# Random erasing mode.
|
199 |
+
_C.AUG.RE_MODE = "pixel"
|
200 |
+
|
201 |
+
# Random erase count.
|
202 |
+
_C.AUG.RE_COUNT = 1
|
203 |
+
|
204 |
+
# Do not random erase first (clean) augmentation split.
|
205 |
+
_C.AUG.RE_SPLIT = False
|
206 |
+
|
207 |
+
# Whether to generate input mask during image processing.
|
208 |
+
_C.AUG.GEN_MASK_LOADER = False
|
209 |
+
|
210 |
+
# If True, masking mode is "tube". Default is "cube".
|
211 |
+
_C.AUG.MASK_TUBE = False
|
212 |
+
|
213 |
+
# If True, masking mode is "frame". Default is "cube".
|
214 |
+
_C.AUG.MASK_FRAMES = False
|
215 |
+
|
216 |
+
# The size of generated masks.
|
217 |
+
_C.AUG.MASK_WINDOW_SIZE = [8, 7, 7]
|
218 |
+
|
219 |
+
# The ratio of masked tokens out of all tokens. Also applies to MViT supervised training
|
220 |
+
_C.AUG.MASK_RATIO = 0.0
|
221 |
+
|
222 |
+
# The maximum number of a masked block. None means no maximum limit. (Used only in image MaskFeat.)
|
223 |
+
_C.AUG.MAX_MASK_PATCHES_PER_BLOCK = None
|
224 |
+
|
225 |
+
# ---------------------------------------------------------------------------- #
|
226 |
+
# Masked pretraining visualization options.
|
227 |
+
# ---------------------------------------------------------------------------- #
|
228 |
+
_C.VIS_MASK = CfgNode()
|
229 |
+
|
230 |
+
# Whether to do visualization.
|
231 |
+
_C.VIS_MASK.ENABLE = False
|
232 |
+
|
233 |
+
# ---------------------------------------------------------------------------- #
|
234 |
+
# MipUp options.
|
235 |
+
# ---------------------------------------------------------------------------- #
|
236 |
+
_C.MIXUP = CfgNode()
|
237 |
+
|
238 |
+
# Whether to use mixup.
|
239 |
+
_C.MIXUP.ENABLE = False
|
240 |
+
|
241 |
+
# Mixup alpha.
|
242 |
+
_C.MIXUP.ALPHA = 0.8
|
243 |
+
|
244 |
+
# Cutmix alpha.
|
245 |
+
_C.MIXUP.CUTMIX_ALPHA = 1.0
|
246 |
+
|
247 |
+
# Probability of performing mixup or cutmix when either/both is enabled.
|
248 |
+
_C.MIXUP.PROB = 1.0
|
249 |
+
|
250 |
+
# Probability of switching to cutmix when both mixup and cutmix enabled.
|
251 |
+
_C.MIXUP.SWITCH_PROB = 0.5
|
252 |
+
|
253 |
+
# Label smoothing.
|
254 |
+
_C.MIXUP.LABEL_SMOOTH_VALUE = 0.1
|
255 |
+
|
256 |
+
# ---------------------------------------------------------------------------- #
|
257 |
+
# Testing options
|
258 |
+
# ---------------------------------------------------------------------------- #
|
259 |
+
_C.TEST = CfgNode()
|
260 |
+
|
261 |
+
# If True test the model, else skip the testing.
|
262 |
+
_C.TEST.ENABLE = True
|
263 |
+
|
264 |
+
# Dataset for testing.
|
265 |
+
_C.TEST.DATASET = "kinetics"
|
266 |
+
|
267 |
+
# Total mini-batch size
|
268 |
+
_C.TEST.BATCH_SIZE = 8
|
269 |
+
|
270 |
+
# Path to the checkpoint to load the initial weight.
|
271 |
+
_C.TEST.CHECKPOINT_FILE_PATH = ""
|
272 |
+
|
273 |
+
# Number of clips to sample from a video uniformly for aggregating the
|
274 |
+
# prediction results.
|
275 |
+
_C.TEST.NUM_ENSEMBLE_VIEWS = 10
|
276 |
+
|
277 |
+
# Number of crops to sample from a frame spatially for aggregating the
|
278 |
+
# prediction results.
|
279 |
+
_C.TEST.NUM_SPATIAL_CROPS = 3
|
280 |
+
|
281 |
+
# Checkpoint types include `caffe2` or `pytorch`.
|
282 |
+
_C.TEST.CHECKPOINT_TYPE = "pytorch"
|
283 |
+
# Path to saving prediction results file.
|
284 |
+
_C.TEST.SAVE_RESULTS_PATH = ""
|
285 |
+
|
286 |
+
_C.TEST.NUM_TEMPORAL_CLIPS = []
|
287 |
+
# -----------------------------------------------------------------------------
|
288 |
+
# ResNet options
|
289 |
+
# -----------------------------------------------------------------------------
|
290 |
+
_C.RESNET = CfgNode()
|
291 |
+
|
292 |
+
# Transformation function.
|
293 |
+
_C.RESNET.TRANS_FUNC = "bottleneck_transform"
|
294 |
+
|
295 |
+
# Number of groups. 1 for ResNet, and larger than 1 for ResNeXt).
|
296 |
+
_C.RESNET.NUM_GROUPS = 1
|
297 |
+
|
298 |
+
# Width of each group (64 -> ResNet; 4 -> ResNeXt).
|
299 |
+
_C.RESNET.WIDTH_PER_GROUP = 64
|
300 |
+
|
301 |
+
# Apply relu in a inplace manner.
|
302 |
+
_C.RESNET.INPLACE_RELU = True
|
303 |
+
|
304 |
+
# Apply stride to 1x1 conv.
|
305 |
+
_C.RESNET.STRIDE_1X1 = False
|
306 |
+
|
307 |
+
# If true, initialize the gamma of the final BN of each block to zero.
|
308 |
+
_C.RESNET.ZERO_INIT_FINAL_BN = False
|
309 |
+
|
310 |
+
# If true, initialize the final conv layer of each block to zero.
|
311 |
+
_C.RESNET.ZERO_INIT_FINAL_CONV = False
|
312 |
+
|
313 |
+
# Number of weight layers.
|
314 |
+
_C.RESNET.DEPTH = 50
|
315 |
+
|
316 |
+
# If the current block has more than NUM_BLOCK_TEMP_KERNEL blocks, use temporal
|
317 |
+
# kernel of 1 for the rest of the blocks.
|
318 |
+
_C.RESNET.NUM_BLOCK_TEMP_KERNEL = [[3], [4], [6], [3]]
|
319 |
+
|
320 |
+
# Size of stride on different res stages.
|
321 |
+
_C.RESNET.SPATIAL_STRIDES = [[1], [2], [2], [2]]
|
322 |
+
|
323 |
+
# Size of dilation on different res stages.
|
324 |
+
_C.RESNET.SPATIAL_DILATIONS = [[1], [1], [1], [1]]
|
325 |
+
|
326 |
+
# ---------------------------------------------------------------------------- #
|
327 |
+
# X3D options
|
328 |
+
# See https://arxiv.org/abs/2004.04730 for details about X3D Networks.
|
329 |
+
# ---------------------------------------------------------------------------- #
|
330 |
+
_C.X3D = CfgNode()
|
331 |
+
|
332 |
+
# Width expansion factor.
|
333 |
+
_C.X3D.WIDTH_FACTOR = 1.0
|
334 |
+
|
335 |
+
# Depth expansion factor.
|
336 |
+
_C.X3D.DEPTH_FACTOR = 1.0
|
337 |
+
|
338 |
+
# Bottleneck expansion factor for the 3x3x3 conv.
|
339 |
+
_C.X3D.BOTTLENECK_FACTOR = 1.0 #
|
340 |
+
|
341 |
+
# Dimensions of the last linear layer before classificaiton.
|
342 |
+
_C.X3D.DIM_C5 = 2048
|
343 |
+
|
344 |
+
# Dimensions of the first 3x3 conv layer.
|
345 |
+
_C.X3D.DIM_C1 = 12
|
346 |
+
|
347 |
+
# Whether to scale the width of Res2, default is false.
|
348 |
+
_C.X3D.SCALE_RES2 = False
|
349 |
+
|
350 |
+
# Whether to use a BatchNorm (BN) layer before the classifier, default is false.
|
351 |
+
_C.X3D.BN_LIN5 = False
|
352 |
+
|
353 |
+
# Whether to use channelwise (=depthwise) convolution in the center (3x3x3)
|
354 |
+
# convolution operation of the residual blocks.
|
355 |
+
_C.X3D.CHANNELWISE_3x3x3 = True
|
356 |
+
|
357 |
+
# -----------------------------------------------------------------------------
|
358 |
+
# Nonlocal options
|
359 |
+
# -----------------------------------------------------------------------------
|
360 |
+
_C.NONLOCAL = CfgNode()
|
361 |
+
|
362 |
+
# Index of each stage and block to add nonlocal layers.
|
363 |
+
_C.NONLOCAL.LOCATION = [[[]], [[]], [[]], [[]]]
|
364 |
+
|
365 |
+
# Number of group for nonlocal for each stage.
|
366 |
+
_C.NONLOCAL.GROUP = [[1], [1], [1], [1]]
|
367 |
+
|
368 |
+
# Instatiation to use for non-local layer.
|
369 |
+
_C.NONLOCAL.INSTANTIATION = "dot_product"
|
370 |
+
|
371 |
+
|
372 |
+
# Size of pooling layers used in Non-Local.
|
373 |
+
_C.NONLOCAL.POOL = [
|
374 |
+
# Res2
|
375 |
+
[[1, 2, 2], [1, 2, 2]],
|
376 |
+
# Res3
|
377 |
+
[[1, 2, 2], [1, 2, 2]],
|
378 |
+
# Res4
|
379 |
+
[[1, 2, 2], [1, 2, 2]],
|
380 |
+
# Res5
|
381 |
+
[[1, 2, 2], [1, 2, 2]],
|
382 |
+
]
|
383 |
+
|
384 |
+
# -----------------------------------------------------------------------------
|
385 |
+
# Model options
|
386 |
+
# -----------------------------------------------------------------------------
|
387 |
+
_C.MODEL = CfgNode()
|
388 |
+
|
389 |
+
# Model architecture.
|
390 |
+
_C.MODEL.ARCH = "slowfast"
|
391 |
+
|
392 |
+
# Model name
|
393 |
+
_C.MODEL.MODEL_NAME = "SlowFast"
|
394 |
+
|
395 |
+
# The number of classes to predict for the model.
|
396 |
+
_C.MODEL.NUM_CLASSES = 400
|
397 |
+
|
398 |
+
# Loss function.
|
399 |
+
_C.MODEL.LOSS_FUNC = "cross_entropy"
|
400 |
+
|
401 |
+
# Model architectures that has one single pathway.
|
402 |
+
_C.MODEL.SINGLE_PATHWAY_ARCH = [
|
403 |
+
"2d",
|
404 |
+
"c2d",
|
405 |
+
"i3d",
|
406 |
+
"slow",
|
407 |
+
"x3d",
|
408 |
+
"mvit",
|
409 |
+
"maskmvit",
|
410 |
+
]
|
411 |
+
|
412 |
+
# Model architectures that has multiple pathways.
|
413 |
+
_C.MODEL.MULTI_PATHWAY_ARCH = ["slowfast"]
|
414 |
+
|
415 |
+
# Dropout rate before final projection in the backbone.
|
416 |
+
_C.MODEL.DROPOUT_RATE = 0.5
|
417 |
+
|
418 |
+
# Randomly drop rate for Res-blocks, linearly increase from res2 to res5
|
419 |
+
_C.MODEL.DROPCONNECT_RATE = 0.0
|
420 |
+
|
421 |
+
# The std to initialize the fc layer(s).
|
422 |
+
_C.MODEL.FC_INIT_STD = 0.01
|
423 |
+
|
424 |
+
# Activation layer for the output head.
|
425 |
+
_C.MODEL.HEAD_ACT = "softmax"
|
426 |
+
|
427 |
+
# Activation checkpointing enabled or not to save GPU memory.
|
428 |
+
_C.MODEL.ACT_CHECKPOINT = False
|
429 |
+
|
430 |
+
# If True, detach the final fc layer from the network, by doing so, only the
|
431 |
+
# final fc layer will be trained.
|
432 |
+
_C.MODEL.DETACH_FINAL_FC = False
|
433 |
+
|
434 |
+
# If True, frozen batch norm stats during training.
|
435 |
+
_C.MODEL.FROZEN_BN = False
|
436 |
+
|
437 |
+
# If True, AllReduce gradients are compressed to fp16
|
438 |
+
_C.MODEL.FP16_ALLREDUCE = False
|
439 |
+
|
440 |
+
|
441 |
+
# -----------------------------------------------------------------------------
|
442 |
+
# MViT options
|
443 |
+
# -----------------------------------------------------------------------------
|
444 |
+
_C.MVIT = CfgNode()
|
445 |
+
|
446 |
+
# Options include `conv`, `max`.
|
447 |
+
_C.MVIT.MODE = "conv"
|
448 |
+
|
449 |
+
# If True, perform pool before projection in attention.
|
450 |
+
_C.MVIT.POOL_FIRST = False
|
451 |
+
|
452 |
+
# If True, use cls embed in the network, otherwise don't use cls_embed in transformer.
|
453 |
+
_C.MVIT.CLS_EMBED_ON = True
|
454 |
+
|
455 |
+
# Kernel size for patchtification.
|
456 |
+
_C.MVIT.PATCH_KERNEL = [3, 7, 7]
|
457 |
+
|
458 |
+
# Stride size for patchtification.
|
459 |
+
_C.MVIT.PATCH_STRIDE = [2, 4, 4]
|
460 |
+
|
461 |
+
# Padding size for patchtification.
|
462 |
+
_C.MVIT.PATCH_PADDING = [2, 4, 4]
|
463 |
+
|
464 |
+
# If True, use 2d patch, otherwise use 3d patch.
|
465 |
+
_C.MVIT.PATCH_2D = False
|
466 |
+
|
467 |
+
# Base embedding dimension for the transformer.
|
468 |
+
_C.MVIT.EMBED_DIM = 96
|
469 |
+
|
470 |
+
# Base num of heads for the transformer.
|
471 |
+
_C.MVIT.NUM_HEADS = 1
|
472 |
+
|
473 |
+
# Dimension reduction ratio for the MLP layers.
|
474 |
+
_C.MVIT.MLP_RATIO = 4.0
|
475 |
+
|
476 |
+
# If use, use bias term in attention fc layers.
|
477 |
+
_C.MVIT.QKV_BIAS = True
|
478 |
+
|
479 |
+
# Drop path rate for the tranfomer.
|
480 |
+
_C.MVIT.DROPPATH_RATE = 0.1
|
481 |
+
|
482 |
+
# The initial value of layer scale gamma. Set 0.0 to disable layer scale.
|
483 |
+
_C.MVIT.LAYER_SCALE_INIT_VALUE = 0.0
|
484 |
+
|
485 |
+
# Depth of the transformer.
|
486 |
+
_C.MVIT.DEPTH = 16
|
487 |
+
|
488 |
+
# Normalization layer for the transformer. Only layernorm is supported now.
|
489 |
+
_C.MVIT.NORM = "layernorm"
|
490 |
+
|
491 |
+
# Dimension multiplication at layer i. If 2.0 is used, then the next block will increase
|
492 |
+
# the dimension by 2 times. Format: [depth_i: mul_dim_ratio]
|
493 |
+
_C.MVIT.DIM_MUL = []
|
494 |
+
|
495 |
+
# Head number multiplication at layer i. If 2.0 is used, then the next block will
|
496 |
+
# increase the number of heads by 2 times. Format: [depth_i: head_mul_ratio]
|
497 |
+
_C.MVIT.HEAD_MUL = []
|
498 |
+
|
499 |
+
# Stride size for the Pool KV at layer i.
|
500 |
+
# Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
|
501 |
+
_C.MVIT.POOL_KV_STRIDE = []
|
502 |
+
|
503 |
+
# Initial stride size for KV at layer 1. The stride size will be further reduced with
|
504 |
+
# the raio of MVIT.DIM_MUL. If will overwrite MVIT.POOL_KV_STRIDE if not None.
|
505 |
+
_C.MVIT.POOL_KV_STRIDE_ADAPTIVE = None
|
506 |
+
|
507 |
+
# Stride size for the Pool Q at layer i.
|
508 |
+
# Format: [[i, stride_t_i, stride_h_i, stride_w_i], ...,]
|
509 |
+
_C.MVIT.POOL_Q_STRIDE = []
|
510 |
+
|
511 |
+
# If not None, overwrite the KV_KERNEL and Q_KERNEL size with POOL_KVQ_CONV_SIZ.
|
512 |
+
# Otherwise the kernel_size is [s + 1 if s > 1 else s for s in stride_size].
|
513 |
+
_C.MVIT.POOL_KVQ_KERNEL = None
|
514 |
+
|
515 |
+
# If True, perform no decay on positional embedding and cls embedding.
|
516 |
+
_C.MVIT.ZERO_DECAY_POS_CLS = True
|
517 |
+
|
518 |
+
# If True, use norm after stem.
|
519 |
+
_C.MVIT.NORM_STEM = False
|
520 |
+
|
521 |
+
# If True, perform separate positional embedding.
|
522 |
+
_C.MVIT.SEP_POS_EMBED = False
|
523 |
+
|
524 |
+
# Dropout rate for the MViT backbone.
|
525 |
+
_C.MVIT.DROPOUT_RATE = 0.0
|
526 |
+
|
527 |
+
# If True, use absolute positional embedding.
|
528 |
+
_C.MVIT.USE_ABS_POS = True
|
529 |
+
|
530 |
+
# If True, use relative positional embedding for spatial dimentions
|
531 |
+
_C.MVIT.REL_POS_SPATIAL = False
|
532 |
+
|
533 |
+
# If True, use relative positional embedding for temporal dimentions
|
534 |
+
_C.MVIT.REL_POS_TEMPORAL = False
|
535 |
+
|
536 |
+
# If True, init rel with zero
|
537 |
+
_C.MVIT.REL_POS_ZERO_INIT = False
|
538 |
+
|
539 |
+
# If True, using Residual Pooling connection
|
540 |
+
_C.MVIT.RESIDUAL_POOLING = False
|
541 |
+
|
542 |
+
# Dim mul in qkv linear layers of attention block instead of MLP
|
543 |
+
_C.MVIT.DIM_MUL_IN_ATT = False
|
544 |
+
|
545 |
+
# If True, using separate linear layers for Q, K, V in attention blocks.
|
546 |
+
_C.MVIT.SEPARATE_QKV = False
|
547 |
+
|
548 |
+
# The initialization scale factor for the head parameters.
|
549 |
+
_C.MVIT.HEAD_INIT_SCALE = 1.0
|
550 |
+
|
551 |
+
# Whether to use the mean pooling of all patch tokens as the output.
|
552 |
+
_C.MVIT.USE_MEAN_POOLING = False
|
553 |
+
|
554 |
+
# If True, use frozen sin cos positional embedding.
|
555 |
+
_C.MVIT.USE_FIXED_SINCOS_POS = False
|
556 |
+
|
557 |
+
# -----------------------------------------------------------------------------
|
558 |
+
# Masked pretraining options
|
559 |
+
# -----------------------------------------------------------------------------
|
560 |
+
_C.MASK = CfgNode()
|
561 |
+
|
562 |
+
# Whether to enable Masked style pretraining.
|
563 |
+
_C.MASK.ENABLE = False
|
564 |
+
|
565 |
+
# Whether to enable MAE (discard encoder tokens).
|
566 |
+
_C.MASK.MAE_ON = False
|
567 |
+
|
568 |
+
# Whether to enable random masking in mae
|
569 |
+
_C.MASK.MAE_RND_MASK = False
|
570 |
+
|
571 |
+
# Whether to do random masking per-frame in mae
|
572 |
+
_C.MASK.PER_FRAME_MASKING = False
|
573 |
+
|
574 |
+
# only predict loss on temporal strided patches, or predict full time extent
|
575 |
+
_C.MASK.TIME_STRIDE_LOSS = True
|
576 |
+
|
577 |
+
# Whether to normalize the pred pixel loss
|
578 |
+
_C.MASK.NORM_PRED_PIXEL = True
|
579 |
+
|
580 |
+
# Whether to fix initialization with inverse depth of layer for pretraining.
|
581 |
+
_C.MASK.SCALE_INIT_BY_DEPTH = False
|
582 |
+
|
583 |
+
# Base embedding dimension for the decoder transformer.
|
584 |
+
_C.MASK.DECODER_EMBED_DIM = 512
|
585 |
+
|
586 |
+
# Base embedding dimension for the decoder transformer.
|
587 |
+
_C.MASK.DECODER_SEP_POS_EMBED = False
|
588 |
+
|
589 |
+
# Use a KV kernel in decoder?
|
590 |
+
_C.MASK.DEC_KV_KERNEL = []
|
591 |
+
|
592 |
+
# Use a KV stride in decoder?
|
593 |
+
_C.MASK.DEC_KV_STRIDE = []
|
594 |
+
|
595 |
+
# The depths of features which are inputs of the prediction head.
|
596 |
+
_C.MASK.PRETRAIN_DEPTH = [15]
|
597 |
+
|
598 |
+
# The type of Masked pretraining prediction head.
|
599 |
+
# Can be "separate", "separate_xformer".
|
600 |
+
_C.MASK.HEAD_TYPE = "separate"
|
601 |
+
|
602 |
+
# The depth of MAE's decoder
|
603 |
+
_C.MASK.DECODER_DEPTH = 0
|
604 |
+
|
605 |
+
# The weight of HOG target loss.
|
606 |
+
_C.MASK.PRED_HOG = False
|
607 |
+
# Reversible Configs
|
608 |
+
_C.MVIT.REV = CfgNode()
|
609 |
+
|
610 |
+
# Enable Reversible Model
|
611 |
+
_C.MVIT.REV.ENABLE = False
|
612 |
+
|
613 |
+
# Method to fuse the reversible paths
|
614 |
+
# see :class: `TwoStreamFusion` for all the options
|
615 |
+
_C.MVIT.REV.RESPATH_FUSE = "concat"
|
616 |
+
|
617 |
+
# Layers to buffer activations at
|
618 |
+
# (at least Q-pooling layers needed)
|
619 |
+
_C.MVIT.REV.BUFFER_LAYERS = []
|
620 |
+
|
621 |
+
# 'conv' or 'max' operator for the respath in Qpooling
|
622 |
+
_C.MVIT.REV.RES_PATH = "conv"
|
623 |
+
|
624 |
+
# Method to merge hidden states before Qpoolinglayers
|
625 |
+
_C.MVIT.REV.PRE_Q_FUSION = "avg"
|
626 |
+
|
627 |
+
# -----------------------------------------------------------------------------
|
628 |
+
# SlowFast options
|
629 |
+
# -----------------------------------------------------------------------------
|
630 |
+
_C.SLOWFAST = CfgNode()
|
631 |
+
|
632 |
+
# Corresponds to the inverse of the channel reduction ratio, $\beta$ between
|
633 |
+
# the Slow and Fast pathways.
|
634 |
+
_C.SLOWFAST.BETA_INV = 8
|
635 |
+
|
636 |
+
# Corresponds to the frame rate reduction ratio, $\alpha$ between the Slow and
|
637 |
+
# Fast pathways.
|
638 |
+
_C.SLOWFAST.ALPHA = 8
|
639 |
+
|
640 |
+
# Ratio of channel dimensions between the Slow and Fast pathways.
|
641 |
+
_C.SLOWFAST.FUSION_CONV_CHANNEL_RATIO = 2
|
642 |
+
|
643 |
+
# Kernel dimension used for fusing information from Fast pathway to Slow
|
644 |
+
# pathway.
|
645 |
+
_C.SLOWFAST.FUSION_KERNEL_SZ = 5
|
646 |
+
|
647 |
+
|
648 |
+
# -----------------------------------------------------------------------------
|
649 |
+
# Data options
|
650 |
+
# -----------------------------------------------------------------------------
|
651 |
+
_C.DATA = CfgNode()
|
652 |
+
|
653 |
+
# The path to the data directory.
|
654 |
+
_C.DATA.PATH_TO_DATA_DIR = ""
|
655 |
+
|
656 |
+
# The separator used between path and label.
|
657 |
+
_C.DATA.PATH_LABEL_SEPARATOR = " "
|
658 |
+
|
659 |
+
# Video path prefix if any.
|
660 |
+
_C.DATA.PATH_PREFIX = ""
|
661 |
+
|
662 |
+
# The number of frames of the input clip.
|
663 |
+
_C.DATA.NUM_FRAMES = 8
|
664 |
+
|
665 |
+
# The video sampling rate of the input clip.
|
666 |
+
_C.DATA.SAMPLING_RATE = 8
|
667 |
+
|
668 |
+
# Eigenvalues for PCA jittering. Note PCA is RGB based.
|
669 |
+
_C.DATA.TRAIN_PCA_EIGVAL = [0.225, 0.224, 0.229]
|
670 |
+
|
671 |
+
# Eigenvectors for PCA jittering.
|
672 |
+
_C.DATA.TRAIN_PCA_EIGVEC = [
|
673 |
+
[-0.5675, 0.7192, 0.4009],
|
674 |
+
[-0.5808, -0.0045, -0.8140],
|
675 |
+
[-0.5836, -0.6948, 0.4203],
|
676 |
+
]
|
677 |
+
|
678 |
+
# If a imdb have been dumpped to a local file with the following format:
|
679 |
+
# `{"im_path": im_path, "class": cont_id}`
|
680 |
+
# then we can skip the construction of imdb and load it from the local file.
|
681 |
+
_C.DATA.PATH_TO_PRELOAD_IMDB = ""
|
682 |
+
|
683 |
+
# The mean value of the video raw pixels across the R G B channels.
|
684 |
+
_C.DATA.MEAN = [0.45, 0.45, 0.45]
|
685 |
+
# List of input frame channel dimensions.
|
686 |
+
|
687 |
+
_C.DATA.INPUT_CHANNEL_NUM = [3, 3]
|
688 |
+
|
689 |
+
# The std value of the video raw pixels across the R G B channels.
|
690 |
+
_C.DATA.STD = [0.225, 0.225, 0.225]
|
691 |
+
|
692 |
+
# The spatial augmentation jitter scales for training.
|
693 |
+
_C.DATA.TRAIN_JITTER_SCALES = [256, 320]
|
694 |
+
|
695 |
+
# The relative scale range of Inception-style area based random resizing augmentation.
|
696 |
+
# If this is provided, DATA.TRAIN_JITTER_SCALES above is ignored.
|
697 |
+
_C.DATA.TRAIN_JITTER_SCALES_RELATIVE = []
|
698 |
+
|
699 |
+
# The relative aspect ratio range of Inception-style area based random resizing
|
700 |
+
# augmentation.
|
701 |
+
_C.DATA.TRAIN_JITTER_ASPECT_RELATIVE = []
|
702 |
+
|
703 |
+
# If True, perform stride length uniform temporal sampling.
|
704 |
+
_C.DATA.USE_OFFSET_SAMPLING = False
|
705 |
+
|
706 |
+
# Whether to apply motion shift for augmentation.
|
707 |
+
_C.DATA.TRAIN_JITTER_MOTION_SHIFT = False
|
708 |
+
|
709 |
+
# The spatial crop size for training.
|
710 |
+
_C.DATA.TRAIN_CROP_SIZE = 224
|
711 |
+
|
712 |
+
# The spatial crop size for testing.
|
713 |
+
_C.DATA.TEST_CROP_SIZE = 256
|
714 |
+
|
715 |
+
# Input videos may has different fps, convert it to the target video fps before
|
716 |
+
# frame sampling.
|
717 |
+
_C.DATA.TARGET_FPS = 30
|
718 |
+
|
719 |
+
# JITTER TARGET_FPS by +- this number randomly
|
720 |
+
_C.DATA.TRAIN_JITTER_FPS = 0.0
|
721 |
+
|
722 |
+
# Decoding backend, options include `pyav` or `torchvision`
|
723 |
+
_C.DATA.DECODING_BACKEND = "torchvision"
|
724 |
+
|
725 |
+
# Decoding resize to short size (set to native size for best speed)
|
726 |
+
_C.DATA.DECODING_SHORT_SIZE = 256
|
727 |
+
|
728 |
+
# if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a
|
729 |
+
# reciprocal to get the scale. If False, take a uniform sample from
|
730 |
+
# [min_scale, max_scale].
|
731 |
+
_C.DATA.INV_UNIFORM_SAMPLE = False
|
732 |
+
|
733 |
+
# If True, perform random horizontal flip on the video frames during training.
|
734 |
+
_C.DATA.RANDOM_FLIP = True
|
735 |
+
|
736 |
+
# If True, calculdate the map as metric.
|
737 |
+
_C.DATA.MULTI_LABEL = False
|
738 |
+
|
739 |
+
# Method to perform the ensemble, options include "sum" and "max".
|
740 |
+
_C.DATA.ENSEMBLE_METHOD = "sum"
|
741 |
+
|
742 |
+
# If True, revert the default input channel (RBG <-> BGR).
|
743 |
+
_C.DATA.REVERSE_INPUT_CHANNEL = False
|
744 |
+
|
745 |
+
# how many samples (=clips) to decode from a single video
|
746 |
+
_C.DATA.TRAIN_CROP_NUM_TEMPORAL = 1
|
747 |
+
|
748 |
+
# how many spatial samples to crop from a single clip
|
749 |
+
_C.DATA.TRAIN_CROP_NUM_SPATIAL = 1
|
750 |
+
|
751 |
+
# color random percentage for grayscale conversion
|
752 |
+
_C.DATA.COLOR_RND_GRAYSCALE = 0.0
|
753 |
+
|
754 |
+
# loader can read .csv file in chunks of this chunk size
|
755 |
+
_C.DATA.LOADER_CHUNK_SIZE = 0
|
756 |
+
|
757 |
+
# if LOADER_CHUNK_SIZE > 0, define overall length of .csv file
|
758 |
+
_C.DATA.LOADER_CHUNK_OVERALL_SIZE = 0
|
759 |
+
|
760 |
+
# for chunked reading, dataloader can skip rows in (large)
|
761 |
+
# training csv file
|
762 |
+
_C.DATA.SKIP_ROWS = 0
|
763 |
+
|
764 |
+
# The separator used between path and label.
|
765 |
+
_C.DATA.PATH_LABEL_SEPARATOR = " "
|
766 |
+
|
767 |
+
# augmentation probability to convert raw decoded video to
|
768 |
+
# grayscale temporal difference
|
769 |
+
_C.DATA.TIME_DIFF_PROB = 0.0
|
770 |
+
|
771 |
+
# Apply SSL-based SimCLR / MoCo v1/v2 color augmentations,
|
772 |
+
# with params below
|
773 |
+
_C.DATA.SSL_COLOR_JITTER = False
|
774 |
+
|
775 |
+
# color jitter percentage for brightness, contrast, saturation
|
776 |
+
_C.DATA.SSL_COLOR_BRI_CON_SAT = [0.4, 0.4, 0.4]
|
777 |
+
|
778 |
+
# color jitter percentage for hue
|
779 |
+
_C.DATA.SSL_COLOR_HUE = 0.1
|
780 |
+
|
781 |
+
# SimCLR / MoCo v2 augmentations on/off
|
782 |
+
_C.DATA.SSL_MOCOV2_AUG = False
|
783 |
+
|
784 |
+
# SimCLR / MoCo v2 blur augmentation minimum gaussian sigma
|
785 |
+
_C.DATA.SSL_BLUR_SIGMA_MIN = [0.0, 0.1]
|
786 |
+
|
787 |
+
# SimCLR / MoCo v2 blur augmentation maximum gaussian sigma
|
788 |
+
_C.DATA.SSL_BLUR_SIGMA_MAX = [0.0, 2.0]
|
789 |
+
|
790 |
+
|
791 |
+
# If combine train/val split as training for in21k
|
792 |
+
_C.DATA.IN22K_TRAINVAL = False
|
793 |
+
|
794 |
+
# If not None, use IN1k as val split when training in21k
|
795 |
+
_C.DATA.IN22k_VAL_IN1K = ""
|
796 |
+
|
797 |
+
# Large resolution models may use different crop ratios
|
798 |
+
_C.DATA.IN_VAL_CROP_RATIO = 0.875 # 224/256 = 0.875
|
799 |
+
|
800 |
+
# don't use real video for kinetics.py
|
801 |
+
_C.DATA.DUMMY_LOAD = False
|
802 |
+
|
803 |
+
# ---------------------------------------------------------------------------- #
|
804 |
+
# Optimizer options
|
805 |
+
# ---------------------------------------------------------------------------- #
|
806 |
+
_C.SOLVER = CfgNode()
|
807 |
+
|
808 |
+
# Base learning rate.
|
809 |
+
_C.SOLVER.BASE_LR = 0.1
|
810 |
+
|
811 |
+
# Learning rate policy (see utils/lr_policy.py for options and examples).
|
812 |
+
_C.SOLVER.LR_POLICY = "cosine"
|
813 |
+
|
814 |
+
# Final learning rates for 'cosine' policy.
|
815 |
+
_C.SOLVER.COSINE_END_LR = 0.0
|
816 |
+
|
817 |
+
# Exponential decay factor.
|
818 |
+
_C.SOLVER.GAMMA = 0.1
|
819 |
+
|
820 |
+
# Step size for 'exp' and 'cos' policies (in epochs).
|
821 |
+
_C.SOLVER.STEP_SIZE = 1
|
822 |
+
|
823 |
+
# Steps for 'steps_' policies (in epochs).
|
824 |
+
_C.SOLVER.STEPS = []
|
825 |
+
|
826 |
+
# Learning rates for 'steps_' policies.
|
827 |
+
_C.SOLVER.LRS = []
|
828 |
+
|
829 |
+
# Maximal number of epochs.
|
830 |
+
_C.SOLVER.MAX_EPOCH = 300
|
831 |
+
|
832 |
+
# Momentum.
|
833 |
+
_C.SOLVER.MOMENTUM = 0.9
|
834 |
+
|
835 |
+
# Momentum dampening.
|
836 |
+
_C.SOLVER.DAMPENING = 0.0
|
837 |
+
|
838 |
+
# Nesterov momentum.
|
839 |
+
_C.SOLVER.NESTEROV = True
|
840 |
+
|
841 |
+
# L2 regularization.
|
842 |
+
_C.SOLVER.WEIGHT_DECAY = 1e-4
|
843 |
+
|
844 |
+
# Start the warm up from SOLVER.BASE_LR * SOLVER.WARMUP_FACTOR.
|
845 |
+
_C.SOLVER.WARMUP_FACTOR = 0.1
|
846 |
+
|
847 |
+
# Gradually warm up the SOLVER.BASE_LR over this number of epochs.
|
848 |
+
_C.SOLVER.WARMUP_EPOCHS = 0.0
|
849 |
+
|
850 |
+
# The start learning rate of the warm up.
|
851 |
+
_C.SOLVER.WARMUP_START_LR = 0.01
|
852 |
+
|
853 |
+
# Optimization method.
|
854 |
+
_C.SOLVER.OPTIMIZING_METHOD = "sgd"
|
855 |
+
|
856 |
+
# Base learning rate is linearly scaled with NUM_SHARDS.
|
857 |
+
_C.SOLVER.BASE_LR_SCALE_NUM_SHARDS = False
|
858 |
+
|
859 |
+
# If True, start from the peak cosine learning rate after warm up.
|
860 |
+
_C.SOLVER.COSINE_AFTER_WARMUP = False
|
861 |
+
|
862 |
+
# If True, perform no weight decay on parameter with one dimension (bias term, etc).
|
863 |
+
_C.SOLVER.ZERO_WD_1D_PARAM = False
|
864 |
+
|
865 |
+
# Clip gradient at this value before optimizer update
|
866 |
+
_C.SOLVER.CLIP_GRAD_VAL = None
|
867 |
+
|
868 |
+
# Clip gradient at this norm before optimizer update
|
869 |
+
_C.SOLVER.CLIP_GRAD_L2NORM = None
|
870 |
+
|
871 |
+
# LARS optimizer
|
872 |
+
_C.SOLVER.LARS_ON = False
|
873 |
+
|
874 |
+
# The layer-wise decay of learning rate. Set to 1. to disable.
|
875 |
+
_C.SOLVER.LAYER_DECAY = 1.0
|
876 |
+
|
877 |
+
# Adam's beta
|
878 |
+
_C.SOLVER.BETAS = (0.9, 0.999)
|
879 |
+
# ---------------------------------------------------------------------------- #
|
880 |
+
# Misc options
|
881 |
+
# ---------------------------------------------------------------------------- #
|
882 |
+
|
883 |
+
# The name of the current task; e.g. "ssl"/"sl" for (self)supervised learning
|
884 |
+
_C.TASK = ""
|
885 |
+
|
886 |
+
# Number of GPUs to use (applies to both training and testing).
|
887 |
+
_C.NUM_GPUS = 1
|
888 |
+
|
889 |
+
# Number of machine to use for the job.
|
890 |
+
_C.NUM_SHARDS = 1
|
891 |
+
|
892 |
+
# The index of the current machine.
|
893 |
+
_C.SHARD_ID = 0
|
894 |
+
|
895 |
+
# Output basedir.
|
896 |
+
_C.OUTPUT_DIR = "."
|
897 |
+
|
898 |
+
# Note that non-determinism may still be present due to non-deterministic
|
899 |
+
# operator implementations in GPU operator libraries.
|
900 |
+
_C.RNG_SEED = 1
|
901 |
+
|
902 |
+
# Log period in iters.
|
903 |
+
_C.LOG_PERIOD = 10
|
904 |
+
|
905 |
+
# If True, log the model info.
|
906 |
+
_C.LOG_MODEL_INFO = True
|
907 |
+
|
908 |
+
# Distributed backend.
|
909 |
+
_C.DIST_BACKEND = "nccl"
|
910 |
+
|
911 |
+
# ---------------------------------------------------------------------------- #
|
912 |
+
# Benchmark options
|
913 |
+
# ---------------------------------------------------------------------------- #
|
914 |
+
_C.BENCHMARK = CfgNode()
|
915 |
+
|
916 |
+
# Number of epochs for data loading benchmark.
|
917 |
+
_C.BENCHMARK.NUM_EPOCHS = 5
|
918 |
+
|
919 |
+
# Log period in iters for data loading benchmark.
|
920 |
+
_C.BENCHMARK.LOG_PERIOD = 100
|
921 |
+
|
922 |
+
# If True, shuffle dataloader for epoch during benchmark.
|
923 |
+
_C.BENCHMARK.SHUFFLE = True
|
924 |
+
|
925 |
+
|
926 |
+
# ---------------------------------------------------------------------------- #
|
927 |
+
# Common train/test data loader options
|
928 |
+
# ---------------------------------------------------------------------------- #
|
929 |
+
_C.DATA_LOADER = CfgNode()
|
930 |
+
|
931 |
+
# Number of data loader workers per training process.
|
932 |
+
_C.DATA_LOADER.NUM_WORKERS = 8
|
933 |
+
|
934 |
+
# Load data to pinned host memory.
|
935 |
+
_C.DATA_LOADER.PIN_MEMORY = True
|
936 |
+
|
937 |
+
# Enable multi thread decoding.
|
938 |
+
_C.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE = False
|
939 |
+
|
940 |
+
|
941 |
+
# ---------------------------------------------------------------------------- #
|
942 |
+
# Detection options.
|
943 |
+
# ---------------------------------------------------------------------------- #
|
944 |
+
_C.DETECTION = CfgNode()
|
945 |
+
|
946 |
+
# Whether enable video detection.
|
947 |
+
_C.DETECTION.ENABLE = False
|
948 |
+
|
949 |
+
# Aligned version of RoI. More details can be found at slowfast/models/head_helper.py
|
950 |
+
_C.DETECTION.ALIGNED = True
|
951 |
+
|
952 |
+
# Spatial scale factor.
|
953 |
+
_C.DETECTION.SPATIAL_SCALE_FACTOR = 16
|
954 |
+
|
955 |
+
# RoI tranformation resolution.
|
956 |
+
_C.DETECTION.ROI_XFORM_RESOLUTION = 7
|
957 |
+
|
958 |
+
|
959 |
+
# -----------------------------------------------------------------------------
|
960 |
+
# AVA Dataset options
|
961 |
+
# -----------------------------------------------------------------------------
|
962 |
+
_C.AVA = CfgNode()
|
963 |
+
|
964 |
+
# Directory path of frames.
|
965 |
+
_C.AVA.FRAME_DIR = "/mnt/fair-flash3-east/ava_trainval_frames.img/"
|
966 |
+
|
967 |
+
# Directory path for files of frame lists.
|
968 |
+
_C.AVA.FRAME_LIST_DIR = (
|
969 |
+
"/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
|
970 |
+
)
|
971 |
+
|
972 |
+
# Directory path for annotation files.
|
973 |
+
_C.AVA.ANNOTATION_DIR = (
|
974 |
+
"/mnt/vol/gfsai-flash3-east/ai-group/users/haoqifan/ava/frame_list/"
|
975 |
+
)
|
976 |
+
|
977 |
+
# Filenames of training samples list files.
|
978 |
+
_C.AVA.TRAIN_LISTS = ["train.csv"]
|
979 |
+
|
980 |
+
# Filenames of test samples list files.
|
981 |
+
_C.AVA.TEST_LISTS = ["val.csv"]
|
982 |
+
|
983 |
+
# Filenames of box list files for training. Note that we assume files which
|
984 |
+
# contains predicted boxes will have a suffix "predicted_boxes" in the
|
985 |
+
# filename.
|
986 |
+
_C.AVA.TRAIN_GT_BOX_LISTS = ["ava_train_v2.2.csv"]
|
987 |
+
_C.AVA.TRAIN_PREDICT_BOX_LISTS = []
|
988 |
+
|
989 |
+
# Filenames of box list files for test.
|
990 |
+
_C.AVA.TEST_PREDICT_BOX_LISTS = ["ava_val_predicted_boxes.csv"]
|
991 |
+
|
992 |
+
# This option controls the score threshold for the predicted boxes to use.
|
993 |
+
_C.AVA.DETECTION_SCORE_THRESH = 0.9
|
994 |
+
|
995 |
+
# If use BGR as the format of input frames.
|
996 |
+
_C.AVA.BGR = False
|
997 |
+
|
998 |
+
# Training augmentation parameters
|
999 |
+
# Whether to use color augmentation method.
|
1000 |
+
_C.AVA.TRAIN_USE_COLOR_AUGMENTATION = False
|
1001 |
+
|
1002 |
+
# Whether to only use PCA jitter augmentation when using color augmentation
|
1003 |
+
# method (otherwise combine with color jitter method).
|
1004 |
+
_C.AVA.TRAIN_PCA_JITTER_ONLY = True
|
1005 |
+
|
1006 |
+
# Whether to do horizontal flipping during test.
|
1007 |
+
_C.AVA.TEST_FORCE_FLIP = False
|
1008 |
+
|
1009 |
+
# Whether to use full test set for validation split.
|
1010 |
+
_C.AVA.FULL_TEST_ON_VAL = False
|
1011 |
+
|
1012 |
+
# The name of the file to the ava label map.
|
1013 |
+
_C.AVA.LABEL_MAP_FILE = "ava_action_list_v2.2_for_activitynet_2019.pbtxt"
|
1014 |
+
|
1015 |
+
# The name of the file to the ava exclusion.
|
1016 |
+
_C.AVA.EXCLUSION_FILE = "ava_val_excluded_timestamps_v2.2.csv"
|
1017 |
+
|
1018 |
+
# The name of the file to the ava groundtruth.
|
1019 |
+
_C.AVA.GROUNDTRUTH_FILE = "ava_val_v2.2.csv"
|
1020 |
+
|
1021 |
+
# Backend to process image, includes `pytorch` and `cv2`.
|
1022 |
+
_C.AVA.IMG_PROC_BACKEND = "cv2"
|
1023 |
+
|
1024 |
+
# ---------------------------------------------------------------------------- #
|
1025 |
+
# Multigrid training options
|
1026 |
+
# See https://arxiv.org/abs/1912.00998 for details about multigrid training.
|
1027 |
+
# ---------------------------------------------------------------------------- #
|
1028 |
+
_C.MULTIGRID = CfgNode()
|
1029 |
+
|
1030 |
+
# Multigrid training allows us to train for more epochs with fewer iterations.
|
1031 |
+
# This hyperparameter specifies how many times more epochs to train.
|
1032 |
+
# The default setting in paper trains for 1.5x more epochs than baseline.
|
1033 |
+
_C.MULTIGRID.EPOCH_FACTOR = 1.5
|
1034 |
+
|
1035 |
+
# Enable short cycles.
|
1036 |
+
_C.MULTIGRID.SHORT_CYCLE = False
|
1037 |
+
# Short cycle additional spatial dimensions relative to the default crop size.
|
1038 |
+
_C.MULTIGRID.SHORT_CYCLE_FACTORS = [0.5, 0.5**0.5]
|
1039 |
+
|
1040 |
+
_C.MULTIGRID.LONG_CYCLE = False
|
1041 |
+
# (Temporal, Spatial) dimensions relative to the default shape.
|
1042 |
+
_C.MULTIGRID.LONG_CYCLE_FACTORS = [
|
1043 |
+
(0.25, 0.5**0.5),
|
1044 |
+
(0.5, 0.5**0.5),
|
1045 |
+
(0.5, 1),
|
1046 |
+
(1, 1),
|
1047 |
+
]
|
1048 |
+
|
1049 |
+
# While a standard BN computes stats across all examples in a GPU,
|
1050 |
+
# for multigrid training we fix the number of clips to compute BN stats on.
|
1051 |
+
# See https://arxiv.org/abs/1912.00998 for details.
|
1052 |
+
_C.MULTIGRID.BN_BASE_SIZE = 8
|
1053 |
+
|
1054 |
+
# Multigrid training epochs are not proportional to actual training time or
|
1055 |
+
# computations, so _C.TRAIN.EVAL_PERIOD leads to too frequent or rare
|
1056 |
+
# evaluation. We use a multigrid-specific rule to determine when to evaluate:
|
1057 |
+
# This hyperparameter defines how many times to evaluate a model per long
|
1058 |
+
# cycle shape.
|
1059 |
+
_C.MULTIGRID.EVAL_FREQ = 3
|
1060 |
+
|
1061 |
+
# No need to specify; Set automatically and used as global variables.
|
1062 |
+
_C.MULTIGRID.LONG_CYCLE_SAMPLING_RATE = 0
|
1063 |
+
_C.MULTIGRID.DEFAULT_B = 0
|
1064 |
+
_C.MULTIGRID.DEFAULT_T = 0
|
1065 |
+
_C.MULTIGRID.DEFAULT_S = 0
|
1066 |
+
|
1067 |
+
# -----------------------------------------------------------------------------
|
1068 |
+
# Tensorboard Visualization Options
|
1069 |
+
# -----------------------------------------------------------------------------
|
1070 |
+
_C.TENSORBOARD = CfgNode()
|
1071 |
+
|
1072 |
+
# Log to summary writer, this will automatically.
|
1073 |
+
# log loss, lr and metrics during train/eval.
|
1074 |
+
_C.TENSORBOARD.ENABLE = False
|
1075 |
+
# Provide path to prediction results for visualization.
|
1076 |
+
# This is a pickle file of [prediction_tensor, label_tensor]
|
1077 |
+
_C.TENSORBOARD.PREDICTIONS_PATH = ""
|
1078 |
+
# Path to directory for tensorboard logs.
|
1079 |
+
# Default to to cfg.OUTPUT_DIR/runs-{cfg.TRAIN.DATASET}.
|
1080 |
+
_C.TENSORBOARD.LOG_DIR = ""
|
1081 |
+
# Path to a json file providing class_name - id mapping
|
1082 |
+
# in the format {"class_name1": id1, "class_name2": id2, ...}.
|
1083 |
+
# This file must be provided to enable plotting confusion matrix
|
1084 |
+
# by a subset or parent categories.
|
1085 |
+
_C.TENSORBOARD.CLASS_NAMES_PATH = ""
|
1086 |
+
|
1087 |
+
# Path to a json file for categories -> classes mapping
|
1088 |
+
# in the format {"parent_class": ["child_class1", "child_class2",...], ...}.
|
1089 |
+
_C.TENSORBOARD.CATEGORIES_PATH = ""
|
1090 |
+
|
1091 |
+
# Config for confusion matrices visualization.
|
1092 |
+
_C.TENSORBOARD.CONFUSION_MATRIX = CfgNode()
|
1093 |
+
# Visualize confusion matrix.
|
1094 |
+
_C.TENSORBOARD.CONFUSION_MATRIX.ENABLE = False
|
1095 |
+
# Figure size of the confusion matrices plotted.
|
1096 |
+
_C.TENSORBOARD.CONFUSION_MATRIX.FIGSIZE = [8, 8]
|
1097 |
+
# Path to a subset of categories to visualize.
|
1098 |
+
# File contains class names separated by newline characters.
|
1099 |
+
_C.TENSORBOARD.CONFUSION_MATRIX.SUBSET_PATH = ""
|
1100 |
+
|
1101 |
+
# Config for histogram visualization.
|
1102 |
+
_C.TENSORBOARD.HISTOGRAM = CfgNode()
|
1103 |
+
# Visualize histograms.
|
1104 |
+
_C.TENSORBOARD.HISTOGRAM.ENABLE = False
|
1105 |
+
# Path to a subset of classes to plot histograms.
|
1106 |
+
# Class names must be separated by newline characters.
|
1107 |
+
_C.TENSORBOARD.HISTOGRAM.SUBSET_PATH = ""
|
1108 |
+
# Visualize top-k most predicted classes on histograms for each
|
1109 |
+
# chosen true label.
|
1110 |
+
_C.TENSORBOARD.HISTOGRAM.TOPK = 10
|
1111 |
+
# Figure size of the histograms plotted.
|
1112 |
+
_C.TENSORBOARD.HISTOGRAM.FIGSIZE = [8, 8]
|
1113 |
+
|
1114 |
+
# Config for layers' weights and activations visualization.
|
1115 |
+
# _C.TENSORBOARD.ENABLE must be True.
|
1116 |
+
_C.TENSORBOARD.MODEL_VIS = CfgNode()
|
1117 |
+
|
1118 |
+
# If False, skip model visualization.
|
1119 |
+
_C.TENSORBOARD.MODEL_VIS.ENABLE = False
|
1120 |
+
|
1121 |
+
# If False, skip visualizing model weights.
|
1122 |
+
_C.TENSORBOARD.MODEL_VIS.MODEL_WEIGHTS = False
|
1123 |
+
|
1124 |
+
# If False, skip visualizing model activations.
|
1125 |
+
_C.TENSORBOARD.MODEL_VIS.ACTIVATIONS = False
|
1126 |
+
|
1127 |
+
# If False, skip visualizing input videos.
|
1128 |
+
_C.TENSORBOARD.MODEL_VIS.INPUT_VIDEO = False
|
1129 |
+
|
1130 |
+
|
1131 |
+
# List of strings containing data about layer names and their indexing to
|
1132 |
+
# visualize weights and activations for. The indexing is meant for
|
1133 |
+
# choosing a subset of activations outputed by a layer for visualization.
|
1134 |
+
# If indexing is not specified, visualize all activations outputed by the layer.
|
1135 |
+
# For each string, layer name and indexing is separated by whitespaces.
|
1136 |
+
# e.g.: [layer1 1,2;1,2, layer2, layer3 150,151;3,4]; this means for each array `arr`
|
1137 |
+
# along the batch dimension in `layer1`, we take arr[[1, 2], [1, 2]]
|
1138 |
+
_C.TENSORBOARD.MODEL_VIS.LAYER_LIST = []
|
1139 |
+
# Top-k predictions to plot on videos
|
1140 |
+
_C.TENSORBOARD.MODEL_VIS.TOPK_PREDS = 1
|
1141 |
+
# Colormap to for text boxes and bounding boxes colors
|
1142 |
+
_C.TENSORBOARD.MODEL_VIS.COLORMAP = "Pastel2"
|
1143 |
+
# Config for visualization video inputs with Grad-CAM.
|
1144 |
+
# _C.TENSORBOARD.ENABLE must be True.
|
1145 |
+
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM = CfgNode()
|
1146 |
+
# Whether to run visualization using Grad-CAM technique.
|
1147 |
+
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.ENABLE = True
|
1148 |
+
# CNN layers to use for Grad-CAM. The number of layers must be equal to
|
1149 |
+
# number of pathway(s).
|
1150 |
+
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.LAYER_LIST = []
|
1151 |
+
# If True, visualize Grad-CAM using true labels for each instances.
|
1152 |
+
# If False, use the highest predicted class.
|
1153 |
+
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.USE_TRUE_LABEL = False
|
1154 |
+
# Colormap to for text boxes and bounding boxes colors
|
1155 |
+
_C.TENSORBOARD.MODEL_VIS.GRAD_CAM.COLORMAP = "viridis"
|
1156 |
+
|
1157 |
+
# Config for visualization for wrong prediction visualization.
|
1158 |
+
# _C.TENSORBOARD.ENABLE must be True.
|
1159 |
+
_C.TENSORBOARD.WRONG_PRED_VIS = CfgNode()
|
1160 |
+
_C.TENSORBOARD.WRONG_PRED_VIS.ENABLE = False
|
1161 |
+
# Folder tag to origanize model eval videos under.
|
1162 |
+
_C.TENSORBOARD.WRONG_PRED_VIS.TAG = "Incorrectly classified videos."
|
1163 |
+
# Subset of labels to visualize. Only wrong predictions with true labels
|
1164 |
+
# within this subset is visualized.
|
1165 |
+
_C.TENSORBOARD.WRONG_PRED_VIS.SUBSET_PATH = ""
|
1166 |
+
|
1167 |
+
|
1168 |
+
# ---------------------------------------------------------------------------- #
|
1169 |
+
# Demo options
|
1170 |
+
# ---------------------------------------------------------------------------- #
|
1171 |
+
_C.DEMO = CfgNode()
|
1172 |
+
|
1173 |
+
# Run model in DEMO mode.
|
1174 |
+
_C.DEMO.ENABLE = False
|
1175 |
+
|
1176 |
+
# Path to a json file providing class_name - id mapping
|
1177 |
+
# in the format {"class_name1": id1, "class_name2": id2, ...}.
|
1178 |
+
_C.DEMO.LABEL_FILE_PATH = ""
|
1179 |
+
|
1180 |
+
# Specify a camera device as input. This will be prioritized
|
1181 |
+
# over input video if set.
|
1182 |
+
# If -1, use input video instead.
|
1183 |
+
_C.DEMO.WEBCAM = -1
|
1184 |
+
|
1185 |
+
# Path to input video for demo.
|
1186 |
+
_C.DEMO.INPUT_VIDEO = ""
|
1187 |
+
# Custom width for reading input video data.
|
1188 |
+
_C.DEMO.DISPLAY_WIDTH = 0
|
1189 |
+
# Custom height for reading input video data.
|
1190 |
+
_C.DEMO.DISPLAY_HEIGHT = 0
|
1191 |
+
# Path to Detectron2 object detection model configuration,
|
1192 |
+
# only used for detection tasks.
|
1193 |
+
_C.DEMO.DETECTRON2_CFG = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
|
1194 |
+
# Path to Detectron2 object detection model pre-trained weights.
|
1195 |
+
_C.DEMO.DETECTRON2_WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl"
|
1196 |
+
# Threshold for choosing predicted bounding boxes by Detectron2.
|
1197 |
+
_C.DEMO.DETECTRON2_THRESH = 0.9
|
1198 |
+
# Number of overlapping frames between 2 consecutive clips.
|
1199 |
+
# Increase this number for more frequent action predictions.
|
1200 |
+
# The number of overlapping frames cannot be larger than
|
1201 |
+
# half of the sequence length `cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE`
|
1202 |
+
_C.DEMO.BUFFER_SIZE = 0
|
1203 |
+
# If specified, the visualized outputs will be written this a video file of
|
1204 |
+
# this path. Otherwise, the visualized outputs will be displayed in a window.
|
1205 |
+
_C.DEMO.OUTPUT_FILE = ""
|
1206 |
+
# Frames per second rate for writing to output video file.
|
1207 |
+
# If not set (-1), use fps rate from input file.
|
1208 |
+
_C.DEMO.OUTPUT_FPS = -1
|
1209 |
+
# Input format from demo video reader ("RGB" or "BGR").
|
1210 |
+
_C.DEMO.INPUT_FORMAT = "BGR"
|
1211 |
+
# Draw visualization frames in [keyframe_idx - CLIP_VIS_SIZE, keyframe_idx + CLIP_VIS_SIZE] inclusively.
|
1212 |
+
_C.DEMO.CLIP_VIS_SIZE = 10
|
1213 |
+
# Number of processes to run video visualizer.
|
1214 |
+
_C.DEMO.NUM_VIS_INSTANCES = 2
|
1215 |
+
|
1216 |
+
# Path to pre-computed predicted boxes
|
1217 |
+
_C.DEMO.PREDS_BOXES = ""
|
1218 |
+
# Whether to run in with multi-threaded video reader.
|
1219 |
+
_C.DEMO.THREAD_ENABLE = False
|
1220 |
+
# Take one clip for every `DEMO.NUM_CLIPS_SKIP` + 1 for prediction and visualization.
|
1221 |
+
# This is used for fast demo speed by reducing the prediction/visualiztion frequency.
|
1222 |
+
# If -1, take the most recent read clip for visualization. This mode is only supported
|
1223 |
+
# if `DEMO.THREAD_ENABLE` is set to True.
|
1224 |
+
_C.DEMO.NUM_CLIPS_SKIP = 0
|
1225 |
+
# Path to ground-truth boxes and labels (optional)
|
1226 |
+
_C.DEMO.GT_BOXES = ""
|
1227 |
+
# The starting second of the video w.r.t bounding boxes file.
|
1228 |
+
_C.DEMO.STARTING_SECOND = 900
|
1229 |
+
# Frames per second of the input video/folder of images.
|
1230 |
+
_C.DEMO.FPS = 30
|
1231 |
+
# Visualize with top-k predictions or predictions above certain threshold(s).
|
1232 |
+
# Option: {"thres", "top-k"}
|
1233 |
+
_C.DEMO.VIS_MODE = "thres"
|
1234 |
+
# Threshold for common class names.
|
1235 |
+
_C.DEMO.COMMON_CLASS_THRES = 0.7
|
1236 |
+
# Theshold for uncommon class names. This will not be
|
1237 |
+
# used if `_C.DEMO.COMMON_CLASS_NAMES` is empty.
|
1238 |
+
_C.DEMO.UNCOMMON_CLASS_THRES = 0.3
|
1239 |
+
# This is chosen based on distribution of examples in
|
1240 |
+
# each classes in AVA dataset.
|
1241 |
+
_C.DEMO.COMMON_CLASS_NAMES = [
|
1242 |
+
"watch (a person)",
|
1243 |
+
"talk to (e.g., self, a person, a group)",
|
1244 |
+
"listen to (a person)",
|
1245 |
+
"touch (an object)",
|
1246 |
+
"carry/hold (an object)",
|
1247 |
+
"walk",
|
1248 |
+
"sit",
|
1249 |
+
"lie/sleep",
|
1250 |
+
"bend/bow (at the waist)",
|
1251 |
+
]
|
1252 |
+
# Slow-motion rate for the visualization. The visualized portions of the
|
1253 |
+
# video will be played `_C.DEMO.SLOWMO` times slower than usual speed.
|
1254 |
+
_C.DEMO.SLOWMO = 1
|
1255 |
+
|
1256 |
+
|
1257 |
+
def assert_and_infer_cfg(cfg):
|
1258 |
+
# BN assertions.
|
1259 |
+
if cfg.BN.USE_PRECISE_STATS:
|
1260 |
+
assert cfg.BN.NUM_BATCHES_PRECISE >= 0
|
1261 |
+
# TRAIN assertions.
|
1262 |
+
assert cfg.TRAIN.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
|
1263 |
+
assert cfg.NUM_GPUS == 0 or cfg.TRAIN.BATCH_SIZE % cfg.NUM_GPUS == 0
|
1264 |
+
|
1265 |
+
# TEST assertions.
|
1266 |
+
assert cfg.TEST.CHECKPOINT_TYPE in ["pytorch", "caffe2"]
|
1267 |
+
assert cfg.NUM_GPUS == 0 or cfg.TEST.BATCH_SIZE % cfg.NUM_GPUS == 0
|
1268 |
+
|
1269 |
+
# RESNET assertions.
|
1270 |
+
assert cfg.RESNET.NUM_GROUPS > 0
|
1271 |
+
assert cfg.RESNET.WIDTH_PER_GROUP > 0
|
1272 |
+
assert cfg.RESNET.WIDTH_PER_GROUP % cfg.RESNET.NUM_GROUPS == 0
|
1273 |
+
|
1274 |
+
# Execute LR scaling by num_shards.
|
1275 |
+
if cfg.SOLVER.BASE_LR_SCALE_NUM_SHARDS:
|
1276 |
+
cfg.SOLVER.BASE_LR *= cfg.NUM_SHARDS
|
1277 |
+
cfg.SOLVER.WARMUP_START_LR *= cfg.NUM_SHARDS
|
1278 |
+
cfg.SOLVER.COSINE_END_LR *= cfg.NUM_SHARDS
|
1279 |
+
|
1280 |
+
# General assertions.
|
1281 |
+
assert cfg.SHARD_ID < cfg.NUM_SHARDS
|
1282 |
+
return cfg
|
1283 |
+
|
1284 |
+
|
1285 |
+
def get_cfg():
|
1286 |
+
return _C.clone()
|
helpers/head.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
3 |
+
|
4 |
+
"""ResNe(X)t Head helper."""
|
5 |
+
|
6 |
+
import torch.nn as nn
|
7 |
+
|
8 |
+
|
9 |
+
class X3DHead(nn.Module):
|
10 |
+
"""
|
11 |
+
X3D head.
|
12 |
+
This layer performs a fully-connected projection during training, when the
|
13 |
+
input size is 1x1x1. It performs a convolutional projection during testing
|
14 |
+
when the input size is larger than 1x1x1. If the inputs are from multiple
|
15 |
+
different pathways, the inputs will be concatenated after pooling.
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
dim_in,
|
21 |
+
dim_inner,
|
22 |
+
dim_out,
|
23 |
+
num_classes,
|
24 |
+
pool_size,
|
25 |
+
dropout_rate=0.0,
|
26 |
+
act_func="softmax",
|
27 |
+
inplace_relu=True,
|
28 |
+
eps=1e-5,
|
29 |
+
bn_mmt=0.1,
|
30 |
+
norm_module=nn.BatchNorm3d,
|
31 |
+
bn_lin5_on=False,
|
32 |
+
):
|
33 |
+
"""
|
34 |
+
The `__init__` method of any subclass should also contain these
|
35 |
+
arguments.
|
36 |
+
X3DHead takes a 5-dim feature tensor (BxCxTxHxW) as input.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
dim_in (float): the channel dimension C of the input.
|
40 |
+
num_classes (int): the channel dimensions of the output.
|
41 |
+
pool_size (float): a single entry list of kernel size for
|
42 |
+
spatiotemporal pooling for the TxHxW dimensions.
|
43 |
+
dropout_rate (float): dropout rate. If equal to 0.0, perform no
|
44 |
+
dropout.
|
45 |
+
act_func (string): activation function to use. 'softmax': applies
|
46 |
+
softmax on the output. 'sigmoid': applies sigmoid on the output.
|
47 |
+
inplace_relu (bool): if True, calculate the relu on the original
|
48 |
+
input without allocating new memory.
|
49 |
+
eps (float): epsilon for batch norm.
|
50 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
51 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
52 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
53 |
+
default is nn.BatchNorm3d.
|
54 |
+
bn_lin5_on (bool): if True, perform normalization on the features
|
55 |
+
before the classifier.
|
56 |
+
"""
|
57 |
+
super(X3DHead, self).__init__()
|
58 |
+
self.pool_size = pool_size
|
59 |
+
self.dropout_rate = dropout_rate
|
60 |
+
self.num_classes = num_classes
|
61 |
+
self.act_func = act_func
|
62 |
+
self.eps = eps
|
63 |
+
self.bn_mmt = bn_mmt
|
64 |
+
self.inplace_relu = inplace_relu
|
65 |
+
self.bn_lin5_on = bn_lin5_on
|
66 |
+
self._construct_head(dim_in, dim_inner, dim_out, norm_module)
|
67 |
+
|
68 |
+
def _construct_head(self, dim_in, dim_inner, dim_out, norm_module):
|
69 |
+
|
70 |
+
self.conv_5 = nn.Conv3d(
|
71 |
+
dim_in,
|
72 |
+
dim_inner,
|
73 |
+
kernel_size=(1, 1, 1),
|
74 |
+
stride=(1, 1, 1),
|
75 |
+
padding=(0, 0, 0),
|
76 |
+
bias=False,
|
77 |
+
)
|
78 |
+
self.conv_5_bn = norm_module(
|
79 |
+
num_features=dim_inner, eps=self.eps, momentum=self.bn_mmt
|
80 |
+
)
|
81 |
+
self.conv_5_relu = nn.ReLU(self.inplace_relu)
|
82 |
+
|
83 |
+
if self.pool_size is None:
|
84 |
+
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
|
85 |
+
else:
|
86 |
+
self.avg_pool = nn.AvgPool3d(self.pool_size, stride=1)
|
87 |
+
|
88 |
+
self.lin_5 = nn.Conv3d(
|
89 |
+
dim_inner,
|
90 |
+
dim_out,
|
91 |
+
kernel_size=(1, 1, 1),
|
92 |
+
stride=(1, 1, 1),
|
93 |
+
padding=(0, 0, 0),
|
94 |
+
bias=False,
|
95 |
+
)
|
96 |
+
if self.bn_lin5_on:
|
97 |
+
self.lin_5_bn = norm_module(
|
98 |
+
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
|
99 |
+
)
|
100 |
+
self.lin_5_relu = nn.ReLU(self.inplace_relu)
|
101 |
+
|
102 |
+
if self.dropout_rate > 0.0:
|
103 |
+
self.dropout = nn.Dropout(self.dropout_rate)
|
104 |
+
# Perform FC in a fully convolutional manner. The FC layer will be
|
105 |
+
# initialized with a different std comparing to convolutional layers.
|
106 |
+
self.projection = nn.Linear(dim_out, self.num_classes, bias=True)
|
107 |
+
|
108 |
+
# Softmax for evaluation and testing.
|
109 |
+
if self.act_func == "softmax":
|
110 |
+
self.act = nn.Softmax(dim=4)
|
111 |
+
elif self.act_func == "sigmoid":
|
112 |
+
self.act = nn.Sigmoid()
|
113 |
+
else:
|
114 |
+
raise NotImplementedError(
|
115 |
+
"{} is not supported as an activation" "function.".format(
|
116 |
+
self.act_func)
|
117 |
+
)
|
118 |
+
|
119 |
+
def forward(self, inputs):
|
120 |
+
# In its current design the X3D head is only useable for a single
|
121 |
+
# pathway input.
|
122 |
+
assert len(inputs) == 1, "Input tensor does not contain 1 pathway"
|
123 |
+
x = self.conv_5(inputs[0])
|
124 |
+
x = self.conv_5_bn(x)
|
125 |
+
x = self.conv_5_relu(x)
|
126 |
+
x = self.avg_pool(x)
|
127 |
+
|
128 |
+
x = self.lin_5(x)
|
129 |
+
if self.bn_lin5_on:
|
130 |
+
x = self.lin_5_bn(x)
|
131 |
+
x = self.lin_5_relu(x)
|
132 |
+
|
133 |
+
# (N, C, T, H, W) -> (N, T, H, W, C).
|
134 |
+
x = x.permute((0, 2, 3, 4, 1))
|
135 |
+
# Perform dropout.
|
136 |
+
if hasattr(self, "dropout"):
|
137 |
+
x = self.dropout(x)
|
138 |
+
x = self.projection(x)
|
139 |
+
|
140 |
+
# Performs fully convlutional inference.
|
141 |
+
if not self.training:
|
142 |
+
x = self.act(x)
|
143 |
+
x = x.mean([1, 2, 3])
|
144 |
+
|
145 |
+
x = x.view(x.shape[0], -1)
|
146 |
+
return x
|
helpers/norm.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
3 |
+
|
4 |
+
"""BatchNorm (BN) utility functions and custom batch-size BN implementations"""
|
5 |
+
|
6 |
+
from functools import partial
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
|
11 |
+
from pytorchvideo.layers.batch_norm import NaiveSyncBatchNorm3d
|
12 |
+
|
13 |
+
|
14 |
+
def get_norm(cfg):
|
15 |
+
"""
|
16 |
+
Args:
|
17 |
+
cfg (CfgNode): model building configs, details are in the comments of
|
18 |
+
the config file.
|
19 |
+
Returns:
|
20 |
+
nn.Module: the normalization layer.
|
21 |
+
"""
|
22 |
+
if cfg.BN.NORM_TYPE in {"batchnorm", "sync_batchnorm_apex"}:
|
23 |
+
return nn.BatchNorm3d
|
24 |
+
elif cfg.BN.NORM_TYPE == "sub_batchnorm":
|
25 |
+
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
|
26 |
+
elif cfg.BN.NORM_TYPE == "sync_batchnorm":
|
27 |
+
return partial(
|
28 |
+
NaiveSyncBatchNorm3d,
|
29 |
+
num_sync_devices=cfg.BN.NUM_SYNC_DEVICES,
|
30 |
+
global_sync=cfg.BN.GLOBAL_SYNC,
|
31 |
+
)
|
32 |
+
else:
|
33 |
+
raise NotImplementedError(
|
34 |
+
"Norm type {} is not supported".format(cfg.BN.NORM_TYPE)
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
class SubBatchNorm3d(nn.Module):
|
39 |
+
"""
|
40 |
+
The standard BN layer computes stats across all examples in a GPU. In some
|
41 |
+
cases it is desirable to compute stats across only a subset of examples
|
42 |
+
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
|
43 |
+
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
|
44 |
+
each of them separately (so that the stats are computed on each subset of
|
45 |
+
examples (1/N of batch) independently. During evaluation, it aggregates
|
46 |
+
the stats from all splits into one BN.
|
47 |
+
"""
|
48 |
+
|
49 |
+
def __init__(self, num_splits, **args):
|
50 |
+
"""
|
51 |
+
Args:
|
52 |
+
num_splits (int): number of splits.
|
53 |
+
args (list): other arguments.
|
54 |
+
"""
|
55 |
+
super(SubBatchNorm3d, self).__init__()
|
56 |
+
self.num_splits = num_splits
|
57 |
+
num_features = args["num_features"]
|
58 |
+
# Keep only one set of weight and bias.
|
59 |
+
if args.get("affine", True):
|
60 |
+
self.affine = True
|
61 |
+
args["affine"] = False
|
62 |
+
self.weight = torch.nn.Parameter(torch.ones(num_features))
|
63 |
+
self.bias = torch.nn.Parameter(torch.zeros(num_features))
|
64 |
+
else:
|
65 |
+
self.affine = False
|
66 |
+
self.bn = nn.BatchNorm3d(**args)
|
67 |
+
args["num_features"] = num_features * num_splits
|
68 |
+
self.split_bn = nn.BatchNorm3d(**args)
|
69 |
+
|
70 |
+
def _get_aggregated_mean_std(self, means, stds, n):
|
71 |
+
"""
|
72 |
+
Calculate the aggregated mean and stds.
|
73 |
+
Args:
|
74 |
+
means (tensor): mean values.
|
75 |
+
stds (tensor): standard deviations.
|
76 |
+
n (int): number of sets of means and stds.
|
77 |
+
"""
|
78 |
+
mean = means.view(n, -1).sum(0) / n
|
79 |
+
std = (
|
80 |
+
stds.view(n, -1).sum(0) / n
|
81 |
+
+ ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
|
82 |
+
)
|
83 |
+
return mean.detach(), std.detach()
|
84 |
+
|
85 |
+
def aggregate_stats(self):
|
86 |
+
"""
|
87 |
+
Synchronize running_mean, and running_var. Call this before eval.
|
88 |
+
"""
|
89 |
+
if self.split_bn.track_running_stats:
|
90 |
+
(
|
91 |
+
self.bn.running_mean.data,
|
92 |
+
self.bn.running_var.data,
|
93 |
+
) = self._get_aggregated_mean_std(
|
94 |
+
self.split_bn.running_mean,
|
95 |
+
self.split_bn.running_var,
|
96 |
+
self.num_splits,
|
97 |
+
)
|
98 |
+
|
99 |
+
def forward(self, x):
|
100 |
+
if self.training:
|
101 |
+
n, c, t, h, w = x.shape
|
102 |
+
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
|
103 |
+
x = self.split_bn(x)
|
104 |
+
x = x.view(n, c, t, h, w)
|
105 |
+
else:
|
106 |
+
x = self.bn(x)
|
107 |
+
if self.affine:
|
108 |
+
x = x * self.weight.view((-1, 1, 1, 1))
|
109 |
+
x = x + self.bias.view((-1, 1, 1, 1))
|
110 |
+
return x
|
helpers/resnet.py
ADDED
@@ -0,0 +1,927 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
3 |
+
|
4 |
+
"""Video models."""
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
from pytorchvideo.layers.swish import Swish
|
9 |
+
|
10 |
+
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
|
11 |
+
"""
|
12 |
+
Stochastic Depth per sample.
|
13 |
+
"""
|
14 |
+
if drop_prob == 0.0 or not training:
|
15 |
+
return x
|
16 |
+
keep_prob = 1 - drop_prob
|
17 |
+
shape = (x.shape[0],) + (1,) * (
|
18 |
+
x.ndim - 1
|
19 |
+
) # work with diff dim tensors, not just 2D ConvNets
|
20 |
+
mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
21 |
+
mask.floor_() # binarize
|
22 |
+
output = x.div(keep_prob) * mask
|
23 |
+
return output
|
24 |
+
|
25 |
+
class Nonlocal(nn.Module):
|
26 |
+
"""
|
27 |
+
Builds Non-local Neural Networks as a generic family of building
|
28 |
+
blocks for capturing long-range dependencies. Non-local Network
|
29 |
+
computes the response at a position as a weighted sum of the
|
30 |
+
features at all positions. This building block can be plugged into
|
31 |
+
many computer vision architectures.
|
32 |
+
More details in the paper: https://arxiv.org/pdf/1711.07971.pdf
|
33 |
+
"""
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
dim,
|
38 |
+
dim_inner,
|
39 |
+
pool_size=None,
|
40 |
+
instantiation="softmax",
|
41 |
+
zero_init_final_conv=False,
|
42 |
+
zero_init_final_norm=True,
|
43 |
+
norm_eps=1e-5,
|
44 |
+
norm_momentum=0.1,
|
45 |
+
norm_module=nn.BatchNorm3d,
|
46 |
+
):
|
47 |
+
"""
|
48 |
+
Args:
|
49 |
+
dim (int): number of dimension for the input.
|
50 |
+
dim_inner (int): number of dimension inside of the Non-local block.
|
51 |
+
pool_size (list): the kernel size of spatial temporal pooling,
|
52 |
+
temporal pool kernel size, spatial pool kernel size, spatial
|
53 |
+
pool kernel size in order. By default pool_size is None,
|
54 |
+
then there would be no pooling used.
|
55 |
+
instantiation (string): supports two different instantiation method:
|
56 |
+
"dot_product": normalizing correlation matrix with L2.
|
57 |
+
"softmax": normalizing correlation matrix with Softmax.
|
58 |
+
zero_init_final_conv (bool): If true, zero initializing the final
|
59 |
+
convolution of the Non-local block.
|
60 |
+
zero_init_final_norm (bool):
|
61 |
+
If true, zero initializing the final batch norm of the Non-local
|
62 |
+
block.
|
63 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
64 |
+
default is nn.BatchNorm3d.
|
65 |
+
"""
|
66 |
+
super(Nonlocal, self).__init__()
|
67 |
+
self.dim = dim
|
68 |
+
self.dim_inner = dim_inner
|
69 |
+
self.pool_size = pool_size
|
70 |
+
self.instantiation = instantiation
|
71 |
+
self.use_pool = (
|
72 |
+
False if pool_size is None else any((size > 1 for size in pool_size))
|
73 |
+
)
|
74 |
+
self.norm_eps = norm_eps
|
75 |
+
self.norm_momentum = norm_momentum
|
76 |
+
self._construct_nonlocal(
|
77 |
+
zero_init_final_conv, zero_init_final_norm, norm_module
|
78 |
+
)
|
79 |
+
|
80 |
+
def _construct_nonlocal(
|
81 |
+
self, zero_init_final_conv, zero_init_final_norm, norm_module
|
82 |
+
):
|
83 |
+
# Three convolution heads: theta, phi, and g.
|
84 |
+
self.conv_theta = nn.Conv3d(
|
85 |
+
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
|
86 |
+
)
|
87 |
+
self.conv_phi = nn.Conv3d(
|
88 |
+
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
|
89 |
+
)
|
90 |
+
self.conv_g = nn.Conv3d(
|
91 |
+
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
|
92 |
+
)
|
93 |
+
|
94 |
+
# Final convolution output.
|
95 |
+
self.conv_out = nn.Conv3d(
|
96 |
+
self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0
|
97 |
+
)
|
98 |
+
# Zero initializing the final convolution output.
|
99 |
+
self.conv_out.zero_init = zero_init_final_conv
|
100 |
+
|
101 |
+
# TODO: change the name to `norm`
|
102 |
+
self.bn = norm_module(
|
103 |
+
num_features=self.dim,
|
104 |
+
eps=self.norm_eps,
|
105 |
+
momentum=self.norm_momentum,
|
106 |
+
)
|
107 |
+
# Zero initializing the final bn.
|
108 |
+
self.bn.transform_final_bn = zero_init_final_norm
|
109 |
+
|
110 |
+
# Optional to add the spatial-temporal pooling.
|
111 |
+
if self.use_pool:
|
112 |
+
self.pool = nn.MaxPool3d(
|
113 |
+
kernel_size=self.pool_size,
|
114 |
+
stride=self.pool_size,
|
115 |
+
padding=[0, 0, 0],
|
116 |
+
)
|
117 |
+
|
118 |
+
def forward(self, x):
|
119 |
+
x_identity = x
|
120 |
+
N, C, T, H, W = x.size()
|
121 |
+
|
122 |
+
theta = self.conv_theta(x)
|
123 |
+
|
124 |
+
# Perform temporal-spatial pooling to reduce the computation.
|
125 |
+
if self.use_pool:
|
126 |
+
x = self.pool(x)
|
127 |
+
|
128 |
+
phi = self.conv_phi(x)
|
129 |
+
g = self.conv_g(x)
|
130 |
+
|
131 |
+
theta = theta.view(N, self.dim_inner, -1)
|
132 |
+
phi = phi.view(N, self.dim_inner, -1)
|
133 |
+
g = g.view(N, self.dim_inner, -1)
|
134 |
+
|
135 |
+
# (N, C, TxHxW) * (N, C, TxHxW) => (N, TxHxW, TxHxW).
|
136 |
+
theta_phi = torch.einsum("nct,ncp->ntp", (theta, phi))
|
137 |
+
# For original Non-local paper, there are two main ways to normalize
|
138 |
+
# the affinity tensor:
|
139 |
+
# 1) Softmax normalization (norm on exp).
|
140 |
+
# 2) dot_product normalization.
|
141 |
+
if self.instantiation == "softmax":
|
142 |
+
# Normalizing the affinity tensor theta_phi before softmax.
|
143 |
+
theta_phi = theta_phi * (self.dim_inner**-0.5)
|
144 |
+
theta_phi = nn.functional.softmax(theta_phi, dim=2)
|
145 |
+
elif self.instantiation == "dot_product":
|
146 |
+
spatial_temporal_dim = theta_phi.shape[2]
|
147 |
+
theta_phi = theta_phi / spatial_temporal_dim
|
148 |
+
else:
|
149 |
+
raise NotImplementedError("Unknown norm type {}".format(self.instantiation))
|
150 |
+
|
151 |
+
# (N, TxHxW, TxHxW) * (N, C, TxHxW) => (N, C, TxHxW).
|
152 |
+
theta_phi_g = torch.einsum("ntg,ncg->nct", (theta_phi, g))
|
153 |
+
|
154 |
+
# (N, C, TxHxW) => (N, C, T, H, W).
|
155 |
+
theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W)
|
156 |
+
|
157 |
+
p = self.conv_out(theta_phi_g)
|
158 |
+
p = self.bn(p)
|
159 |
+
return x_identity + p
|
160 |
+
|
161 |
+
class SE(nn.Module):
|
162 |
+
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
|
163 |
+
|
164 |
+
def _round_width(self, width, multiplier, min_width=8, divisor=8):
|
165 |
+
"""
|
166 |
+
Round width of filters based on width multiplier
|
167 |
+
Args:
|
168 |
+
width (int): the channel dimensions of the input.
|
169 |
+
multiplier (float): the multiplication factor.
|
170 |
+
min_width (int): the minimum width after multiplication.
|
171 |
+
divisor (int): the new width should be dividable by divisor.
|
172 |
+
"""
|
173 |
+
if not multiplier:
|
174 |
+
return width
|
175 |
+
|
176 |
+
width *= multiplier
|
177 |
+
min_width = min_width or divisor
|
178 |
+
width_out = max(min_width, int(width + divisor / 2) // divisor * divisor)
|
179 |
+
if width_out < 0.9 * width:
|
180 |
+
width_out += divisor
|
181 |
+
return int(width_out)
|
182 |
+
|
183 |
+
def __init__(self, dim_in, ratio, relu_act=True):
|
184 |
+
"""
|
185 |
+
Args:
|
186 |
+
dim_in (int): the channel dimensions of the input.
|
187 |
+
ratio (float): the channel reduction ratio for squeeze.
|
188 |
+
relu_act (bool): whether to use ReLU activation instead
|
189 |
+
of Swish (default).
|
190 |
+
divisor (int): the new width should be dividable by divisor.
|
191 |
+
"""
|
192 |
+
super(SE, self).__init__()
|
193 |
+
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
|
194 |
+
dim_fc = self._round_width(dim_in, ratio)
|
195 |
+
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
|
196 |
+
self.fc1_act = nn.ReLU() if relu_act else Swish()
|
197 |
+
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
|
198 |
+
|
199 |
+
self.fc2_sig = nn.Sigmoid()
|
200 |
+
|
201 |
+
def forward(self, x):
|
202 |
+
x_in = x
|
203 |
+
for module in self.children():
|
204 |
+
x = module(x)
|
205 |
+
return x_in * x
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
def get_trans_func(name):
|
211 |
+
"""
|
212 |
+
Retrieves the transformation module by name.
|
213 |
+
"""
|
214 |
+
trans_funcs = {
|
215 |
+
"bottleneck_transform": BottleneckTransform,
|
216 |
+
"basic_transform": BasicTransform,
|
217 |
+
"x3d_transform": X3DTransform,
|
218 |
+
}
|
219 |
+
assert (
|
220 |
+
name in trans_funcs.keys()
|
221 |
+
), "Transformation function '{}' not supported".format(name)
|
222 |
+
return trans_funcs[name]
|
223 |
+
|
224 |
+
|
225 |
+
class BasicTransform(nn.Module):
|
226 |
+
"""
|
227 |
+
Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel.
|
228 |
+
"""
|
229 |
+
|
230 |
+
def __init__(
|
231 |
+
self,
|
232 |
+
dim_in,
|
233 |
+
dim_out,
|
234 |
+
temp_kernel_size,
|
235 |
+
stride,
|
236 |
+
dim_inner=None,
|
237 |
+
num_groups=1,
|
238 |
+
stride_1x1=None,
|
239 |
+
inplace_relu=True,
|
240 |
+
eps=1e-5,
|
241 |
+
bn_mmt=0.1,
|
242 |
+
dilation=1,
|
243 |
+
norm_module=nn.BatchNorm3d,
|
244 |
+
block_idx=0,
|
245 |
+
):
|
246 |
+
"""
|
247 |
+
Args:
|
248 |
+
dim_in (int): the channel dimensions of the input.
|
249 |
+
dim_out (int): the channel dimension of the output.
|
250 |
+
temp_kernel_size (int): the temporal kernel sizes of the first
|
251 |
+
convolution in the basic block.
|
252 |
+
stride (int): the stride of the bottleneck.
|
253 |
+
dim_inner (None): the inner dimension would not be used in
|
254 |
+
BasicTransform.
|
255 |
+
num_groups (int): number of groups for the convolution. Number of
|
256 |
+
group is always 1 for BasicTransform.
|
257 |
+
stride_1x1 (None): stride_1x1 will not be used in BasicTransform.
|
258 |
+
inplace_relu (bool): if True, calculate the relu on the original
|
259 |
+
input without allocating new memory.
|
260 |
+
eps (float): epsilon for batch norm.
|
261 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
262 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
263 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
264 |
+
default is nn.BatchNorm3d.
|
265 |
+
"""
|
266 |
+
super(BasicTransform, self).__init__()
|
267 |
+
self.temp_kernel_size = temp_kernel_size
|
268 |
+
self._inplace_relu = inplace_relu
|
269 |
+
self._eps = eps
|
270 |
+
self._bn_mmt = bn_mmt
|
271 |
+
self._construct(dim_in, dim_out, stride, dilation, norm_module)
|
272 |
+
|
273 |
+
def _construct(self, dim_in, dim_out, stride, dilation, norm_module):
|
274 |
+
# Tx3x3, BN, ReLU.
|
275 |
+
self.a = nn.Conv3d(
|
276 |
+
dim_in,
|
277 |
+
dim_out,
|
278 |
+
kernel_size=[self.temp_kernel_size, 3, 3],
|
279 |
+
stride=[1, stride, stride],
|
280 |
+
padding=[int(self.temp_kernel_size // 2), 1, 1],
|
281 |
+
bias=False,
|
282 |
+
)
|
283 |
+
self.a_bn = norm_module(
|
284 |
+
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
|
285 |
+
)
|
286 |
+
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
|
287 |
+
# 1x3x3, BN.
|
288 |
+
self.b = nn.Conv3d(
|
289 |
+
dim_out,
|
290 |
+
dim_out,
|
291 |
+
kernel_size=[1, 3, 3],
|
292 |
+
stride=[1, 1, 1],
|
293 |
+
padding=[0, dilation, dilation],
|
294 |
+
dilation=[1, dilation, dilation],
|
295 |
+
bias=False,
|
296 |
+
)
|
297 |
+
|
298 |
+
self.b.final_conv = True
|
299 |
+
|
300 |
+
self.b_bn = norm_module(
|
301 |
+
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
|
302 |
+
)
|
303 |
+
|
304 |
+
self.b_bn.transform_final_bn = True
|
305 |
+
|
306 |
+
def forward(self, x):
|
307 |
+
x = self.a(x)
|
308 |
+
x = self.a_bn(x)
|
309 |
+
x = self.a_relu(x)
|
310 |
+
|
311 |
+
x = self.b(x)
|
312 |
+
x = self.b_bn(x)
|
313 |
+
return x
|
314 |
+
|
315 |
+
|
316 |
+
class X3DTransform(nn.Module):
|
317 |
+
"""
|
318 |
+
X3D transformation: 1x1x1, Tx3x3 (channelwise, num_groups=dim_in), 1x1x1,
|
319 |
+
augmented with (optional) SE (squeeze-excitation) on the 3x3x3 output.
|
320 |
+
T is the temporal kernel size (defaulting to 3)
|
321 |
+
"""
|
322 |
+
|
323 |
+
def __init__(
|
324 |
+
self,
|
325 |
+
dim_in,
|
326 |
+
dim_out,
|
327 |
+
temp_kernel_size,
|
328 |
+
stride,
|
329 |
+
dim_inner,
|
330 |
+
num_groups,
|
331 |
+
stride_1x1=False,
|
332 |
+
inplace_relu=True,
|
333 |
+
eps=1e-5,
|
334 |
+
bn_mmt=0.1,
|
335 |
+
dilation=1,
|
336 |
+
norm_module=nn.BatchNorm3d,
|
337 |
+
se_ratio=0.0625,
|
338 |
+
swish_inner=True,
|
339 |
+
block_idx=0,
|
340 |
+
):
|
341 |
+
"""
|
342 |
+
Args:
|
343 |
+
dim_in (int): the channel dimensions of the input.
|
344 |
+
dim_out (int): the channel dimension of the output.
|
345 |
+
temp_kernel_size (int): the temporal kernel sizes of the middle
|
346 |
+
convolution in the bottleneck.
|
347 |
+
stride (int): the stride of the bottleneck.
|
348 |
+
dim_inner (int): the inner dimension of the block.
|
349 |
+
num_groups (int): number of groups for the convolution. num_groups=1
|
350 |
+
is for standard ResNet like networks, and num_groups>1 is for
|
351 |
+
ResNeXt like networks.
|
352 |
+
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
|
353 |
+
apply stride to the 3x3 conv.
|
354 |
+
inplace_relu (bool): if True, calculate the relu on the original
|
355 |
+
input without allocating new memory.
|
356 |
+
eps (float): epsilon for batch norm.
|
357 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
358 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
359 |
+
dilation (int): size of dilation.
|
360 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
361 |
+
default is nn.BatchNorm3d.
|
362 |
+
se_ratio (float): if > 0, apply SE to the Tx3x3 conv, with the SE
|
363 |
+
channel dimensionality being se_ratio times the Tx3x3 conv dim.
|
364 |
+
swish_inner (bool): if True, apply swish to the Tx3x3 conv, otherwise
|
365 |
+
apply ReLU to the Tx3x3 conv.
|
366 |
+
"""
|
367 |
+
super(X3DTransform, self).__init__()
|
368 |
+
self.temp_kernel_size = temp_kernel_size
|
369 |
+
self._inplace_relu = inplace_relu
|
370 |
+
self._eps = eps
|
371 |
+
self._bn_mmt = bn_mmt
|
372 |
+
self._se_ratio = se_ratio
|
373 |
+
self._swish_inner = swish_inner
|
374 |
+
self._stride_1x1 = stride_1x1
|
375 |
+
self._block_idx = block_idx
|
376 |
+
self._construct(
|
377 |
+
dim_in,
|
378 |
+
dim_out,
|
379 |
+
stride,
|
380 |
+
dim_inner,
|
381 |
+
num_groups,
|
382 |
+
dilation,
|
383 |
+
norm_module,
|
384 |
+
)
|
385 |
+
|
386 |
+
def _construct(
|
387 |
+
self,
|
388 |
+
dim_in,
|
389 |
+
dim_out,
|
390 |
+
stride,
|
391 |
+
dim_inner,
|
392 |
+
num_groups,
|
393 |
+
dilation,
|
394 |
+
norm_module,
|
395 |
+
):
|
396 |
+
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
|
397 |
+
|
398 |
+
# 1x1x1, BN, ReLU.
|
399 |
+
self.a = nn.Conv3d(
|
400 |
+
dim_in,
|
401 |
+
dim_inner,
|
402 |
+
kernel_size=[1, 1, 1],
|
403 |
+
stride=[1, str1x1, str1x1],
|
404 |
+
padding=[0, 0, 0],
|
405 |
+
bias=False,
|
406 |
+
)
|
407 |
+
self.a_bn = norm_module(
|
408 |
+
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
|
409 |
+
)
|
410 |
+
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
|
411 |
+
|
412 |
+
# Tx3x3, BN, ReLU.
|
413 |
+
self.b = nn.Conv3d(
|
414 |
+
dim_inner,
|
415 |
+
dim_inner,
|
416 |
+
[self.temp_kernel_size, 3, 3],
|
417 |
+
stride=[1, str3x3, str3x3],
|
418 |
+
padding=[int(self.temp_kernel_size // 2), dilation, dilation],
|
419 |
+
groups=num_groups,
|
420 |
+
bias=False,
|
421 |
+
dilation=[1, dilation, dilation],
|
422 |
+
)
|
423 |
+
self.b_bn = norm_module(
|
424 |
+
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
|
425 |
+
)
|
426 |
+
|
427 |
+
# Apply SE attention or not
|
428 |
+
use_se = True if (self._block_idx + 1) % 2 else False
|
429 |
+
if self._se_ratio > 0.0 and use_se:
|
430 |
+
self.se = SE(dim_inner, self._se_ratio)
|
431 |
+
|
432 |
+
if self._swish_inner:
|
433 |
+
self.b_relu = Swish()
|
434 |
+
else:
|
435 |
+
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
|
436 |
+
|
437 |
+
# 1x1x1, BN.
|
438 |
+
self.c = nn.Conv3d(
|
439 |
+
dim_inner,
|
440 |
+
dim_out,
|
441 |
+
kernel_size=[1, 1, 1],
|
442 |
+
stride=[1, 1, 1],
|
443 |
+
padding=[0, 0, 0],
|
444 |
+
bias=False,
|
445 |
+
)
|
446 |
+
self.c_bn = norm_module(
|
447 |
+
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
|
448 |
+
)
|
449 |
+
self.c_bn.transform_final_bn = True
|
450 |
+
|
451 |
+
def forward(self, x):
|
452 |
+
for block in self.children():
|
453 |
+
x = block(x)
|
454 |
+
return x
|
455 |
+
|
456 |
+
|
457 |
+
class BottleneckTransform(nn.Module):
|
458 |
+
"""
|
459 |
+
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
|
460 |
+
temporal kernel.
|
461 |
+
"""
|
462 |
+
|
463 |
+
def __init__(
|
464 |
+
self,
|
465 |
+
dim_in,
|
466 |
+
dim_out,
|
467 |
+
temp_kernel_size,
|
468 |
+
stride,
|
469 |
+
dim_inner,
|
470 |
+
num_groups,
|
471 |
+
stride_1x1=False,
|
472 |
+
inplace_relu=True,
|
473 |
+
eps=1e-5,
|
474 |
+
bn_mmt=0.1,
|
475 |
+
dilation=1,
|
476 |
+
norm_module=nn.BatchNorm3d,
|
477 |
+
block_idx=0,
|
478 |
+
):
|
479 |
+
"""
|
480 |
+
Args:
|
481 |
+
dim_in (int): the channel dimensions of the input.
|
482 |
+
dim_out (int): the channel dimension of the output.
|
483 |
+
temp_kernel_size (int): the temporal kernel sizes of the first
|
484 |
+
convolution in the bottleneck.
|
485 |
+
stride (int): the stride of the bottleneck.
|
486 |
+
dim_inner (int): the inner dimension of the block.
|
487 |
+
num_groups (int): number of groups for the convolution. num_groups=1
|
488 |
+
is for standard ResNet like networks, and num_groups>1 is for
|
489 |
+
ResNeXt like networks.
|
490 |
+
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
|
491 |
+
apply stride to the 3x3 conv.
|
492 |
+
inplace_relu (bool): if True, calculate the relu on the original
|
493 |
+
input without allocating new memory.
|
494 |
+
eps (float): epsilon for batch norm.
|
495 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
496 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
497 |
+
dilation (int): size of dilation.
|
498 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
499 |
+
default is nn.BatchNorm3d.
|
500 |
+
"""
|
501 |
+
super(BottleneckTransform, self).__init__()
|
502 |
+
self.temp_kernel_size = temp_kernel_size
|
503 |
+
self._inplace_relu = inplace_relu
|
504 |
+
self._eps = eps
|
505 |
+
self._bn_mmt = bn_mmt
|
506 |
+
self._stride_1x1 = stride_1x1
|
507 |
+
self._construct(
|
508 |
+
dim_in,
|
509 |
+
dim_out,
|
510 |
+
stride,
|
511 |
+
dim_inner,
|
512 |
+
num_groups,
|
513 |
+
dilation,
|
514 |
+
norm_module,
|
515 |
+
)
|
516 |
+
|
517 |
+
def _construct(
|
518 |
+
self,
|
519 |
+
dim_in,
|
520 |
+
dim_out,
|
521 |
+
stride,
|
522 |
+
dim_inner,
|
523 |
+
num_groups,
|
524 |
+
dilation,
|
525 |
+
norm_module,
|
526 |
+
):
|
527 |
+
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
|
528 |
+
|
529 |
+
# Tx1x1, BN, ReLU.
|
530 |
+
self.a = nn.Conv3d(
|
531 |
+
dim_in,
|
532 |
+
dim_inner,
|
533 |
+
kernel_size=[self.temp_kernel_size, 1, 1],
|
534 |
+
stride=[1, str1x1, str1x1],
|
535 |
+
padding=[int(self.temp_kernel_size // 2), 0, 0],
|
536 |
+
bias=False,
|
537 |
+
)
|
538 |
+
self.a_bn = norm_module(
|
539 |
+
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
|
540 |
+
)
|
541 |
+
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
|
542 |
+
|
543 |
+
# 1x3x3, BN, ReLU.
|
544 |
+
self.b = nn.Conv3d(
|
545 |
+
dim_inner,
|
546 |
+
dim_inner,
|
547 |
+
[1, 3, 3],
|
548 |
+
stride=[1, str3x3, str3x3],
|
549 |
+
padding=[0, dilation, dilation],
|
550 |
+
groups=num_groups,
|
551 |
+
bias=False,
|
552 |
+
dilation=[1, dilation, dilation],
|
553 |
+
)
|
554 |
+
self.b_bn = norm_module(
|
555 |
+
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
|
556 |
+
)
|
557 |
+
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
|
558 |
+
|
559 |
+
# 1x1x1, BN.
|
560 |
+
self.c = nn.Conv3d(
|
561 |
+
dim_inner,
|
562 |
+
dim_out,
|
563 |
+
kernel_size=[1, 1, 1],
|
564 |
+
stride=[1, 1, 1],
|
565 |
+
padding=[0, 0, 0],
|
566 |
+
bias=False,
|
567 |
+
)
|
568 |
+
self.c.final_conv = True
|
569 |
+
|
570 |
+
self.c_bn = norm_module(
|
571 |
+
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
|
572 |
+
)
|
573 |
+
self.c_bn.transform_final_bn = True
|
574 |
+
|
575 |
+
def forward(self, x):
|
576 |
+
# Explicitly forward every layer.
|
577 |
+
# Branch2a.
|
578 |
+
x = self.a(x)
|
579 |
+
x = self.a_bn(x)
|
580 |
+
x = self.a_relu(x)
|
581 |
+
|
582 |
+
# Branch2b.
|
583 |
+
x = self.b(x)
|
584 |
+
x = self.b_bn(x)
|
585 |
+
x = self.b_relu(x)
|
586 |
+
|
587 |
+
# Branch2c
|
588 |
+
x = self.c(x)
|
589 |
+
x = self.c_bn(x)
|
590 |
+
return x
|
591 |
+
|
592 |
+
|
593 |
+
class ResBlock(nn.Module):
|
594 |
+
"""
|
595 |
+
Residual block.
|
596 |
+
"""
|
597 |
+
|
598 |
+
def __init__(
|
599 |
+
self,
|
600 |
+
dim_in,
|
601 |
+
dim_out,
|
602 |
+
temp_kernel_size,
|
603 |
+
stride,
|
604 |
+
trans_func,
|
605 |
+
dim_inner,
|
606 |
+
num_groups=1,
|
607 |
+
stride_1x1=False,
|
608 |
+
inplace_relu=True,
|
609 |
+
eps=1e-5,
|
610 |
+
bn_mmt=0.1,
|
611 |
+
dilation=1,
|
612 |
+
norm_module=nn.BatchNorm3d,
|
613 |
+
block_idx=0,
|
614 |
+
drop_connect_rate=0.0,
|
615 |
+
):
|
616 |
+
"""
|
617 |
+
ResBlock class constructs redisual blocks. More details can be found in:
|
618 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
|
619 |
+
"Deep residual learning for image recognition."
|
620 |
+
https://arxiv.org/abs/1512.03385
|
621 |
+
Args:
|
622 |
+
dim_in (int): the channel dimensions of the input.
|
623 |
+
dim_out (int): the channel dimension of the output.
|
624 |
+
temp_kernel_size (int): the temporal kernel sizes of the middle
|
625 |
+
convolution in the bottleneck.
|
626 |
+
stride (int): the stride of the bottleneck.
|
627 |
+
trans_func (string): transform function to be used to construct the
|
628 |
+
bottleneck.
|
629 |
+
dim_inner (int): the inner dimension of the block.
|
630 |
+
num_groups (int): number of groups for the convolution. num_groups=1
|
631 |
+
is for standard ResNet like networks, and num_groups>1 is for
|
632 |
+
ResNeXt like networks.
|
633 |
+
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
|
634 |
+
apply stride to the 3x3 conv.
|
635 |
+
inplace_relu (bool): calculate the relu on the original input
|
636 |
+
without allocating new memory.
|
637 |
+
eps (float): epsilon for batch norm.
|
638 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
639 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
640 |
+
dilation (int): size of dilation.
|
641 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
642 |
+
default is nn.BatchNorm3d.
|
643 |
+
drop_connect_rate (float): basic rate at which blocks are dropped,
|
644 |
+
linearly increases from input to output blocks.
|
645 |
+
"""
|
646 |
+
super(ResBlock, self).__init__()
|
647 |
+
self._inplace_relu = inplace_relu
|
648 |
+
self._eps = eps
|
649 |
+
self._bn_mmt = bn_mmt
|
650 |
+
self._drop_connect_rate = drop_connect_rate
|
651 |
+
self._construct(
|
652 |
+
dim_in,
|
653 |
+
dim_out,
|
654 |
+
temp_kernel_size,
|
655 |
+
stride,
|
656 |
+
trans_func,
|
657 |
+
dim_inner,
|
658 |
+
num_groups,
|
659 |
+
stride_1x1,
|
660 |
+
inplace_relu,
|
661 |
+
dilation,
|
662 |
+
norm_module,
|
663 |
+
block_idx,
|
664 |
+
)
|
665 |
+
|
666 |
+
def _construct(
|
667 |
+
self,
|
668 |
+
dim_in,
|
669 |
+
dim_out,
|
670 |
+
temp_kernel_size,
|
671 |
+
stride,
|
672 |
+
trans_func,
|
673 |
+
dim_inner,
|
674 |
+
num_groups,
|
675 |
+
stride_1x1,
|
676 |
+
inplace_relu,
|
677 |
+
dilation,
|
678 |
+
norm_module,
|
679 |
+
block_idx,
|
680 |
+
):
|
681 |
+
# Use skip connection with projection if dim or res change.
|
682 |
+
if (dim_in != dim_out) or (stride != 1):
|
683 |
+
self.branch1 = nn.Conv3d(
|
684 |
+
dim_in,
|
685 |
+
dim_out,
|
686 |
+
kernel_size=1,
|
687 |
+
stride=[1, stride, stride],
|
688 |
+
padding=0,
|
689 |
+
bias=False,
|
690 |
+
dilation=1,
|
691 |
+
)
|
692 |
+
self.branch1_bn = norm_module(
|
693 |
+
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
|
694 |
+
)
|
695 |
+
self.branch2 = trans_func(
|
696 |
+
dim_in,
|
697 |
+
dim_out,
|
698 |
+
temp_kernel_size,
|
699 |
+
stride,
|
700 |
+
dim_inner,
|
701 |
+
num_groups,
|
702 |
+
stride_1x1=stride_1x1,
|
703 |
+
inplace_relu=inplace_relu,
|
704 |
+
dilation=dilation,
|
705 |
+
norm_module=norm_module,
|
706 |
+
block_idx=block_idx,
|
707 |
+
)
|
708 |
+
self.relu = nn.ReLU(self._inplace_relu)
|
709 |
+
|
710 |
+
def forward(self, x):
|
711 |
+
f_x = self.branch2(x)
|
712 |
+
if self.training and self._drop_connect_rate > 0.0:
|
713 |
+
f_x = drop_path(f_x, self._drop_connect_rate)
|
714 |
+
if hasattr(self, "branch1"):
|
715 |
+
x = self.branch1_bn(self.branch1(x)) + f_x
|
716 |
+
else:
|
717 |
+
x = x + f_x
|
718 |
+
x = self.relu(x)
|
719 |
+
return x
|
720 |
+
|
721 |
+
|
722 |
+
class ResStage(nn.Module):
|
723 |
+
"""
|
724 |
+
Stage of 3D ResNet. It expects to have one or more tensors as input for
|
725 |
+
single pathway (C2D, I3D, Slow), and multi-pathway (SlowFast) cases.
|
726 |
+
More details can be found here:
|
727 |
+
|
728 |
+
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
|
729 |
+
"SlowFast networks for video recognition."
|
730 |
+
https://arxiv.org/pdf/1812.03982.pdf
|
731 |
+
"""
|
732 |
+
|
733 |
+
def __init__(
|
734 |
+
self,
|
735 |
+
dim_in,
|
736 |
+
dim_out,
|
737 |
+
stride,
|
738 |
+
temp_kernel_sizes,
|
739 |
+
num_blocks,
|
740 |
+
dim_inner,
|
741 |
+
num_groups,
|
742 |
+
num_block_temp_kernel,
|
743 |
+
nonlocal_inds,
|
744 |
+
nonlocal_group,
|
745 |
+
nonlocal_pool,
|
746 |
+
dilation,
|
747 |
+
instantiation="softmax",
|
748 |
+
trans_func_name="bottleneck_transform",
|
749 |
+
stride_1x1=False,
|
750 |
+
inplace_relu=True,
|
751 |
+
norm_module=nn.BatchNorm3d,
|
752 |
+
drop_connect_rate=0.0,
|
753 |
+
):
|
754 |
+
"""
|
755 |
+
The `__init__` method of any subclass should also contain these arguments.
|
756 |
+
ResStage builds p streams, where p can be greater or equal to one.
|
757 |
+
Args:
|
758 |
+
dim_in (list): list of p the channel dimensions of the input.
|
759 |
+
Different channel dimensions control the input dimension of
|
760 |
+
different pathways.
|
761 |
+
dim_out (list): list of p the channel dimensions of the output.
|
762 |
+
Different channel dimensions control the input dimension of
|
763 |
+
different pathways.
|
764 |
+
temp_kernel_sizes (list): list of the p temporal kernel sizes of the
|
765 |
+
convolution in the bottleneck. Different temp_kernel_sizes
|
766 |
+
control different pathway.
|
767 |
+
stride (list): list of the p strides of the bottleneck. Different
|
768 |
+
stride control different pathway.
|
769 |
+
num_blocks (list): list of p numbers of blocks for each of the
|
770 |
+
pathway.
|
771 |
+
dim_inner (list): list of the p inner channel dimensions of the
|
772 |
+
input. Different channel dimensions control the input dimension
|
773 |
+
of different pathways.
|
774 |
+
num_groups (list): list of number of p groups for the convolution.
|
775 |
+
num_groups=1 is for standard ResNet like networks, and
|
776 |
+
num_groups>1 is for ResNeXt like networks.
|
777 |
+
num_block_temp_kernel (list): extent the temp_kernel_sizes to
|
778 |
+
num_block_temp_kernel blocks, then fill temporal kernel size
|
779 |
+
of 1 for the rest of the layers.
|
780 |
+
nonlocal_inds (list): If the tuple is empty, no nonlocal layer will
|
781 |
+
be added. If the tuple is not empty, add nonlocal layers after
|
782 |
+
the index-th block.
|
783 |
+
dilation (list): size of dilation for each pathway.
|
784 |
+
nonlocal_group (list): list of number of p nonlocal groups. Each
|
785 |
+
number controls how to fold temporal dimension to batch
|
786 |
+
dimension before applying nonlocal transformation.
|
787 |
+
https://github.com/facebookresearch/video-nonlocal-net.
|
788 |
+
instantiation (string): different instantiation for nonlocal layer.
|
789 |
+
Supports two different instantiation method:
|
790 |
+
"dot_product": normalizing correlation matrix with L2.
|
791 |
+
"softmax": normalizing correlation matrix with Softmax.
|
792 |
+
trans_func_name (string): name of the the transformation function apply
|
793 |
+
on the network.
|
794 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
795 |
+
default is nn.BatchNorm3d.
|
796 |
+
drop_connect_rate (float): basic rate at which blocks are dropped,
|
797 |
+
linearly increases from input to output blocks.
|
798 |
+
"""
|
799 |
+
super(ResStage, self).__init__()
|
800 |
+
assert all(
|
801 |
+
(
|
802 |
+
num_block_temp_kernel[i] <= num_blocks[i]
|
803 |
+
for i in range(len(temp_kernel_sizes))
|
804 |
+
)
|
805 |
+
)
|
806 |
+
self.num_blocks = num_blocks
|
807 |
+
self.nonlocal_group = nonlocal_group
|
808 |
+
self._drop_connect_rate = drop_connect_rate
|
809 |
+
self.temp_kernel_sizes = [
|
810 |
+
(temp_kernel_sizes[i] * num_blocks[i])[: num_block_temp_kernel[i]]
|
811 |
+
+ [1] * (num_blocks[i] - num_block_temp_kernel[i])
|
812 |
+
for i in range(len(temp_kernel_sizes))
|
813 |
+
]
|
814 |
+
assert (
|
815 |
+
len(
|
816 |
+
{
|
817 |
+
len(dim_in),
|
818 |
+
len(dim_out),
|
819 |
+
len(temp_kernel_sizes),
|
820 |
+
len(stride),
|
821 |
+
len(num_blocks),
|
822 |
+
len(dim_inner),
|
823 |
+
len(num_groups),
|
824 |
+
len(num_block_temp_kernel),
|
825 |
+
len(nonlocal_inds),
|
826 |
+
len(nonlocal_group),
|
827 |
+
}
|
828 |
+
)
|
829 |
+
== 1
|
830 |
+
)
|
831 |
+
self.num_pathways = len(self.num_blocks)
|
832 |
+
self._construct(
|
833 |
+
dim_in,
|
834 |
+
dim_out,
|
835 |
+
stride,
|
836 |
+
dim_inner,
|
837 |
+
num_groups,
|
838 |
+
trans_func_name,
|
839 |
+
stride_1x1,
|
840 |
+
inplace_relu,
|
841 |
+
nonlocal_inds,
|
842 |
+
nonlocal_pool,
|
843 |
+
instantiation,
|
844 |
+
dilation,
|
845 |
+
norm_module,
|
846 |
+
)
|
847 |
+
|
848 |
+
def _construct(
|
849 |
+
self,
|
850 |
+
dim_in,
|
851 |
+
dim_out,
|
852 |
+
stride,
|
853 |
+
dim_inner,
|
854 |
+
num_groups,
|
855 |
+
trans_func_name,
|
856 |
+
stride_1x1,
|
857 |
+
inplace_relu,
|
858 |
+
nonlocal_inds,
|
859 |
+
nonlocal_pool,
|
860 |
+
instantiation,
|
861 |
+
dilation,
|
862 |
+
norm_module,
|
863 |
+
):
|
864 |
+
for pathway in range(self.num_pathways):
|
865 |
+
for i in range(self.num_blocks[pathway]):
|
866 |
+
# Retrieve the transformation function.
|
867 |
+
trans_func = get_trans_func(trans_func_name)
|
868 |
+
# Construct the block.
|
869 |
+
res_block = ResBlock(
|
870 |
+
dim_in[pathway] if i == 0 else dim_out[pathway],
|
871 |
+
dim_out[pathway],
|
872 |
+
self.temp_kernel_sizes[pathway][i],
|
873 |
+
stride[pathway] if i == 0 else 1,
|
874 |
+
trans_func,
|
875 |
+
dim_inner[pathway],
|
876 |
+
num_groups[pathway],
|
877 |
+
stride_1x1=stride_1x1,
|
878 |
+
inplace_relu=inplace_relu,
|
879 |
+
dilation=dilation[pathway],
|
880 |
+
norm_module=norm_module,
|
881 |
+
block_idx=i,
|
882 |
+
drop_connect_rate=self._drop_connect_rate,
|
883 |
+
)
|
884 |
+
self.add_module("pathway{}_res{}".format(
|
885 |
+
pathway, i), res_block)
|
886 |
+
if i in nonlocal_inds[pathway]:
|
887 |
+
nln = Nonlocal(
|
888 |
+
dim_out[pathway],
|
889 |
+
dim_out[pathway] // 2,
|
890 |
+
nonlocal_pool[pathway],
|
891 |
+
instantiation=instantiation,
|
892 |
+
norm_module=norm_module,
|
893 |
+
)
|
894 |
+
self.add_module(
|
895 |
+
"pathway{}_nonlocal{}".format(pathway, i), nln)
|
896 |
+
|
897 |
+
def forward(self, inputs):
|
898 |
+
output = []
|
899 |
+
for pathway in range(self.num_pathways):
|
900 |
+
x = inputs[pathway]
|
901 |
+
for i in range(self.num_blocks[pathway]):
|
902 |
+
m = getattr(self, "pathway{}_res{}".format(pathway, i))
|
903 |
+
x = m(x)
|
904 |
+
if hasattr(self, "pathway{}_nonlocal{}".format(pathway, i)):
|
905 |
+
nln = getattr(
|
906 |
+
self, "pathway{}_nonlocal{}".format(pathway, i))
|
907 |
+
b, c, t, h, w = x.shape
|
908 |
+
if self.nonlocal_group[pathway] > 1:
|
909 |
+
# Fold temporal dimension into batch dimension.
|
910 |
+
x = x.permute(0, 2, 1, 3, 4)
|
911 |
+
x = x.reshape(
|
912 |
+
b * self.nonlocal_group[pathway],
|
913 |
+
t // self.nonlocal_group[pathway],
|
914 |
+
c,
|
915 |
+
h,
|
916 |
+
w,
|
917 |
+
)
|
918 |
+
x = x.permute(0, 2, 1, 3, 4)
|
919 |
+
x = nln(x)
|
920 |
+
if self.nonlocal_group[pathway] > 1:
|
921 |
+
# Fold back to temporal dimension.
|
922 |
+
x = x.permute(0, 2, 1, 3, 4)
|
923 |
+
x = x.reshape(b, t, c, h, w)
|
924 |
+
x = x.permute(0, 2, 1, 3, 4)
|
925 |
+
output.append(x)
|
926 |
+
|
927 |
+
return output
|
helpers/stem.py
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
3 |
+
|
4 |
+
"""ResNe(X)t 3D stem helper."""
|
5 |
+
|
6 |
+
import torch.nn as nn
|
7 |
+
|
8 |
+
|
9 |
+
def get_stem_func(name):
|
10 |
+
"""
|
11 |
+
Retrieves the stem module by name.
|
12 |
+
"""
|
13 |
+
trans_funcs = {"x3d_stem": X3DStem, "basic_stem": ResNetBasicStem}
|
14 |
+
assert (
|
15 |
+
name in trans_funcs.keys()
|
16 |
+
), "Transformation function '{}' not supported".format(name)
|
17 |
+
return trans_funcs[name]
|
18 |
+
|
19 |
+
|
20 |
+
class VideoModelStem(nn.Module):
|
21 |
+
"""
|
22 |
+
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
|
23 |
+
on input data tensor for one or multiple pathways.
|
24 |
+
"""
|
25 |
+
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
dim_in,
|
29 |
+
dim_out,
|
30 |
+
kernel,
|
31 |
+
stride,
|
32 |
+
padding,
|
33 |
+
inplace_relu=True,
|
34 |
+
eps=1e-5,
|
35 |
+
bn_mmt=0.1,
|
36 |
+
norm_module=nn.BatchNorm3d,
|
37 |
+
stem_func_name="basic_stem",
|
38 |
+
):
|
39 |
+
"""
|
40 |
+
The `__init__` method of any subclass should also contain these
|
41 |
+
arguments. List size of 1 for single pathway models (C2D, I3D, Slow
|
42 |
+
and etc), list size of 2 for two pathway models (SlowFast).
|
43 |
+
|
44 |
+
Args:
|
45 |
+
dim_in (list): the list of channel dimensions of the inputs.
|
46 |
+
dim_out (list): the output dimension of the convolution in the stem
|
47 |
+
layer.
|
48 |
+
kernel (list): the kernels' size of the convolutions in the stem
|
49 |
+
layers. Temporal kernel size, height kernel size, width kernel
|
50 |
+
size in order.
|
51 |
+
stride (list): the stride sizes of the convolutions in the stem
|
52 |
+
layer. Temporal kernel stride, height kernel size, width kernel
|
53 |
+
size in order.
|
54 |
+
padding (list): the paddings' sizes of the convolutions in the stem
|
55 |
+
layer. Temporal padding size, height padding size, width padding
|
56 |
+
size in order.
|
57 |
+
inplace_relu (bool): calculate the relu on the original input
|
58 |
+
without allocating new memory.
|
59 |
+
eps (float): epsilon for batch norm.
|
60 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
61 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
62 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
63 |
+
default is nn.BatchNorm3d.
|
64 |
+
stem_func_name (string): name of the the stem function applied on
|
65 |
+
input to the network.
|
66 |
+
"""
|
67 |
+
super(VideoModelStem, self).__init__()
|
68 |
+
|
69 |
+
assert (
|
70 |
+
len(
|
71 |
+
{
|
72 |
+
len(dim_in),
|
73 |
+
len(dim_out),
|
74 |
+
len(kernel),
|
75 |
+
len(stride),
|
76 |
+
len(padding),
|
77 |
+
}
|
78 |
+
)
|
79 |
+
== 1
|
80 |
+
), "Input pathway dimensions are not consistent. {} {} {} {} {}".format(
|
81 |
+
len(dim_in),
|
82 |
+
len(dim_out),
|
83 |
+
len(kernel),
|
84 |
+
len(stride),
|
85 |
+
len(padding),
|
86 |
+
)
|
87 |
+
|
88 |
+
self.num_pathways = len(dim_in)
|
89 |
+
self.kernel = kernel
|
90 |
+
self.stride = stride
|
91 |
+
self.padding = padding
|
92 |
+
self.inplace_relu = inplace_relu
|
93 |
+
self.eps = eps
|
94 |
+
self.bn_mmt = bn_mmt
|
95 |
+
# Construct the stem layer.
|
96 |
+
self._construct_stem(dim_in, dim_out, norm_module, stem_func_name)
|
97 |
+
|
98 |
+
def _construct_stem(self, dim_in, dim_out, norm_module, stem_func_name):
|
99 |
+
trans_func = get_stem_func(stem_func_name)
|
100 |
+
|
101 |
+
for pathway in range(len(dim_in)):
|
102 |
+
stem = trans_func(
|
103 |
+
dim_in[pathway],
|
104 |
+
dim_out[pathway],
|
105 |
+
self.kernel[pathway],
|
106 |
+
self.stride[pathway],
|
107 |
+
self.padding[pathway],
|
108 |
+
self.inplace_relu,
|
109 |
+
self.eps,
|
110 |
+
self.bn_mmt,
|
111 |
+
norm_module,
|
112 |
+
)
|
113 |
+
self.add_module("pathway{}_stem".format(pathway), stem)
|
114 |
+
|
115 |
+
def forward(self, x):
|
116 |
+
assert (
|
117 |
+
len(x) == self.num_pathways
|
118 |
+
), "Input tensor does not contain {} pathway".format(self.num_pathways)
|
119 |
+
# use a new list, don't modify in-place the x list, which is bad for activation checkpointing.
|
120 |
+
y = []
|
121 |
+
for pathway in range(len(x)):
|
122 |
+
m = getattr(self, "pathway{}_stem".format(pathway))
|
123 |
+
y.append(m(x[pathway]))
|
124 |
+
return y
|
125 |
+
|
126 |
+
|
127 |
+
class ResNetBasicStem(nn.Module):
|
128 |
+
"""
|
129 |
+
ResNe(X)t 3D stem module.
|
130 |
+
Performs spatiotemporal Convolution, BN, and Relu following by a
|
131 |
+
spatiotemporal pooling.
|
132 |
+
"""
|
133 |
+
|
134 |
+
def __init__(
|
135 |
+
self,
|
136 |
+
dim_in,
|
137 |
+
dim_out,
|
138 |
+
kernel,
|
139 |
+
stride,
|
140 |
+
padding,
|
141 |
+
inplace_relu=True,
|
142 |
+
eps=1e-5,
|
143 |
+
bn_mmt=0.1,
|
144 |
+
norm_module=nn.BatchNorm3d,
|
145 |
+
):
|
146 |
+
"""
|
147 |
+
The `__init__` method of any subclass should also contain these arguments.
|
148 |
+
|
149 |
+
Args:
|
150 |
+
dim_in (int): the channel dimension of the input. Normally 3 is used
|
151 |
+
for rgb input, and 2 or 3 is used for optical flow input.
|
152 |
+
dim_out (int): the output dimension of the convolution in the stem
|
153 |
+
layer.
|
154 |
+
kernel (list): the kernel size of the convolution in the stem layer.
|
155 |
+
temporal kernel size, height kernel size, width kernel size in
|
156 |
+
order.
|
157 |
+
stride (list): the stride size of the convolution in the stem layer.
|
158 |
+
temporal kernel stride, height kernel size, width kernel size in
|
159 |
+
order.
|
160 |
+
padding (int): the padding size of the convolution in the stem
|
161 |
+
layer, temporal padding size, height padding size, width
|
162 |
+
padding size in order.
|
163 |
+
inplace_relu (bool): calculate the relu on the original input
|
164 |
+
without allocating new memory.
|
165 |
+
eps (float): epsilon for batch norm.
|
166 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
167 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
168 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
169 |
+
default is nn.BatchNorm3d.
|
170 |
+
"""
|
171 |
+
super(ResNetBasicStem, self).__init__()
|
172 |
+
self.kernel = kernel
|
173 |
+
self.stride = stride
|
174 |
+
self.padding = padding
|
175 |
+
self.inplace_relu = inplace_relu
|
176 |
+
self.eps = eps
|
177 |
+
self.bn_mmt = bn_mmt
|
178 |
+
# Construct the stem layer.
|
179 |
+
self._construct_stem(dim_in, dim_out, norm_module)
|
180 |
+
|
181 |
+
def _construct_stem(self, dim_in, dim_out, norm_module):
|
182 |
+
self.conv = nn.Conv3d(
|
183 |
+
dim_in,
|
184 |
+
dim_out,
|
185 |
+
self.kernel,
|
186 |
+
stride=self.stride,
|
187 |
+
padding=self.padding,
|
188 |
+
bias=False,
|
189 |
+
)
|
190 |
+
self.bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
|
191 |
+
self.relu = nn.ReLU(self.inplace_relu)
|
192 |
+
self.pool_layer = nn.MaxPool3d(
|
193 |
+
kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
|
194 |
+
)
|
195 |
+
|
196 |
+
def forward(self, x):
|
197 |
+
x = self.conv(x)
|
198 |
+
x = self.bn(x)
|
199 |
+
x = self.relu(x)
|
200 |
+
x = self.pool_layer(x)
|
201 |
+
return x
|
202 |
+
|
203 |
+
|
204 |
+
class X3DStem(nn.Module):
|
205 |
+
"""
|
206 |
+
X3D's 3D stem module.
|
207 |
+
Performs a spatial followed by a depthwise temporal Convolution, BN, and Relu following by a
|
208 |
+
spatiotemporal pooling.
|
209 |
+
"""
|
210 |
+
|
211 |
+
def __init__(
|
212 |
+
self,
|
213 |
+
dim_in,
|
214 |
+
dim_out,
|
215 |
+
kernel,
|
216 |
+
stride,
|
217 |
+
padding,
|
218 |
+
inplace_relu=True,
|
219 |
+
eps=1e-5,
|
220 |
+
bn_mmt=0.1,
|
221 |
+
norm_module=nn.BatchNorm3d,
|
222 |
+
):
|
223 |
+
"""
|
224 |
+
The `__init__` method of any subclass should also contain these arguments.
|
225 |
+
|
226 |
+
Args:
|
227 |
+
dim_in (int): the channel dimension of the input. Normally 3 is used
|
228 |
+
for rgb input, and 2 or 3 is used for optical flow input.
|
229 |
+
dim_out (int): the output dimension of the convolution in the stem
|
230 |
+
layer.
|
231 |
+
kernel (list): the kernel size of the convolution in the stem layer.
|
232 |
+
temporal kernel size, height kernel size, width kernel size in
|
233 |
+
order.
|
234 |
+
stride (list): the stride size of the convolution in the stem layer.
|
235 |
+
temporal kernel stride, height kernel size, width kernel size in
|
236 |
+
order.
|
237 |
+
padding (int): the padding size of the convolution in the stem
|
238 |
+
layer, temporal padding size, height padding size, width
|
239 |
+
padding size in order.
|
240 |
+
inplace_relu (bool): calculate the relu on the original input
|
241 |
+
without allocating new memory.
|
242 |
+
eps (float): epsilon for batch norm.
|
243 |
+
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
|
244 |
+
PyTorch = 1 - BN momentum in Caffe2.
|
245 |
+
norm_module (nn.Module): nn.Module for the normalization layer. The
|
246 |
+
default is nn.BatchNorm3d.
|
247 |
+
"""
|
248 |
+
super(X3DStem, self).__init__()
|
249 |
+
self.kernel = kernel
|
250 |
+
self.stride = stride
|
251 |
+
self.padding = padding
|
252 |
+
self.inplace_relu = inplace_relu
|
253 |
+
self.eps = eps
|
254 |
+
self.bn_mmt = bn_mmt
|
255 |
+
# Construct the stem layer.
|
256 |
+
self._construct_stem(dim_in, dim_out, norm_module)
|
257 |
+
|
258 |
+
def _construct_stem(self, dim_in, dim_out, norm_module):
|
259 |
+
self.conv_xy = nn.Conv3d(
|
260 |
+
dim_in,
|
261 |
+
dim_out,
|
262 |
+
kernel_size=(1, self.kernel[1], self.kernel[2]),
|
263 |
+
stride=(1, self.stride[1], self.stride[2]),
|
264 |
+
padding=(0, self.padding[1], self.padding[2]),
|
265 |
+
bias=False,
|
266 |
+
)
|
267 |
+
self.conv = nn.Conv3d(
|
268 |
+
dim_out,
|
269 |
+
dim_out,
|
270 |
+
kernel_size=(self.kernel[0], 1, 1),
|
271 |
+
stride=(self.stride[0], 1, 1),
|
272 |
+
padding=(self.padding[0], 0, 0),
|
273 |
+
bias=False,
|
274 |
+
groups=dim_out,
|
275 |
+
)
|
276 |
+
|
277 |
+
self.bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
|
278 |
+
self.relu = nn.ReLU(self.inplace_relu)
|
279 |
+
|
280 |
+
def forward(self, x):
|
281 |
+
x = self.conv_xy(x)
|
282 |
+
x = self.conv(x)
|
283 |
+
x = self.bn(x)
|
284 |
+
x = self.relu(x)
|
285 |
+
return x
|
286 |
+
|
287 |
+
|
288 |
+
class PatchEmbed(nn.Module):
|
289 |
+
"""
|
290 |
+
PatchEmbed.
|
291 |
+
"""
|
292 |
+
|
293 |
+
def __init__(
|
294 |
+
self,
|
295 |
+
dim_in=3,
|
296 |
+
dim_out=768,
|
297 |
+
kernel=(1, 16, 16),
|
298 |
+
stride=(1, 4, 4),
|
299 |
+
padding=(1, 7, 7),
|
300 |
+
conv_2d=False,
|
301 |
+
):
|
302 |
+
super().__init__()
|
303 |
+
if conv_2d:
|
304 |
+
conv = nn.Conv2d
|
305 |
+
else:
|
306 |
+
conv = nn.Conv3d
|
307 |
+
self.proj = conv(
|
308 |
+
dim_in,
|
309 |
+
dim_out,
|
310 |
+
kernel_size=kernel,
|
311 |
+
stride=stride,
|
312 |
+
padding=padding,
|
313 |
+
)
|
314 |
+
|
315 |
+
def forward(self, x, keep_spatial=False):
|
316 |
+
x = self.proj(x)
|
317 |
+
if keep_spatial:
|
318 |
+
return x, x.shape
|
319 |
+
# B C (T) H W -> B (T)HW C
|
320 |
+
return x.flatten(2).transpose(1, 2), x.shape
|
modeling_x3d.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PreTrainedModel
|
2 |
+
from x3d_model.configuration_x3d import X3DConfig
|
3 |
+
from x3d_model.x3d import build_model
|
4 |
+
|
5 |
+
|
6 |
+
class X3DModel(PreTrainedModel):
|
7 |
+
config_class = X3DConfig
|
8 |
+
|
9 |
+
def __init__(self, config):
|
10 |
+
super().__init__(config)
|
11 |
+
self.model = build_model(config.cfg)
|
12 |
+
|
13 |
+
def forward(self, input_video):
|
14 |
+
outputs = self.model(input_video)
|
15 |
+
return outputs
|
x3d.py
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
3 |
+
|
4 |
+
import math
|
5 |
+
import torch
|
6 |
+
from torch import nn
|
7 |
+
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks_default
|
8 |
+
from fvcore.nn.weight_init import c2_msra_fill, c2_xavier_fill
|
9 |
+
|
10 |
+
from .helpers.norm import get_norm
|
11 |
+
from .helpers.stem import VideoModelStem
|
12 |
+
from .helpers.resnet import ResStage
|
13 |
+
from .helpers.head import X3DHead
|
14 |
+
|
15 |
+
# round width
|
16 |
+
|
17 |
+
|
18 |
+
def round_width(width, multiplier, min_width=1, divisor=1):
|
19 |
+
if not multiplier:
|
20 |
+
return width
|
21 |
+
width *= multiplier
|
22 |
+
min_width = min_width or divisor
|
23 |
+
width_out = max(min_width, int(width + divisor / 2) // divisor * divisor)
|
24 |
+
if width_out < 0.9 * width:
|
25 |
+
width_out += divisor
|
26 |
+
return int(width_out)
|
27 |
+
|
28 |
+
# init weights
|
29 |
+
|
30 |
+
|
31 |
+
def init_weights(
|
32 |
+
model, fc_init_std=0.01, zero_init_final_bn=True, zero_init_final_conv=False
|
33 |
+
):
|
34 |
+
"""
|
35 |
+
Performs ResNet style weight initialization.
|
36 |
+
Args:
|
37 |
+
fc_init_std (float): the expected standard deviation for fc layer.
|
38 |
+
zero_init_final_bn (bool): if True, zero initialize the final bn for
|
39 |
+
every bottleneck.
|
40 |
+
"""
|
41 |
+
for m in model.modules():
|
42 |
+
if isinstance(m, nn.Conv3d):
|
43 |
+
# Note that there is no bias due to BN
|
44 |
+
if hasattr(m, "final_conv") and zero_init_final_conv:
|
45 |
+
m.weight.data.zero_()
|
46 |
+
else:
|
47 |
+
"""
|
48 |
+
Follow the initialization method proposed in:
|
49 |
+
{He, Kaiming, et al.
|
50 |
+
"Delving deep into rectifiers: Surpassing human-level
|
51 |
+
performance on imagenet classification."
|
52 |
+
arXiv preprint arXiv:1502.01852 (2015)}
|
53 |
+
"""
|
54 |
+
c2_msra_fill(m)
|
55 |
+
|
56 |
+
elif isinstance(m, (nn.BatchNorm3d, nn.BatchNorm2d, nn.BatchNorm1d)):
|
57 |
+
if (
|
58 |
+
hasattr(m, "transform_final_bn")
|
59 |
+
and m.transform_final_bn
|
60 |
+
and zero_init_final_bn
|
61 |
+
):
|
62 |
+
batchnorm_weight = 0.0
|
63 |
+
else:
|
64 |
+
batchnorm_weight = 1.0
|
65 |
+
if m.weight is not None:
|
66 |
+
m.weight.data.fill_(batchnorm_weight)
|
67 |
+
if m.bias is not None:
|
68 |
+
m.bias.data.zero_()
|
69 |
+
if isinstance(m, nn.Linear):
|
70 |
+
if hasattr(m, "xavier_init") and m.xavier_init:
|
71 |
+
c2_xavier_fill(m)
|
72 |
+
else:
|
73 |
+
m.weight.data.normal_(mean=0.0, std=fc_init_std)
|
74 |
+
if m.bias is not None:
|
75 |
+
m.bias.data.zero_()
|
76 |
+
|
77 |
+
|
78 |
+
# pool1
|
79 |
+
|
80 |
+
_POOL1 = {
|
81 |
+
"2d": [[1, 1, 1]],
|
82 |
+
"c2d": [[2, 1, 1]],
|
83 |
+
"slow_c2d": [[1, 1, 1]],
|
84 |
+
"i3d": [[2, 1, 1]],
|
85 |
+
"slow_i3d": [[1, 1, 1]],
|
86 |
+
"slow": [[1, 1, 1]],
|
87 |
+
"slowfast": [[1, 1, 1], [1, 1, 1]],
|
88 |
+
"x3d": [[1, 1, 1]],
|
89 |
+
}
|
90 |
+
|
91 |
+
# temporal kernel basis
|
92 |
+
|
93 |
+
_TEMPORAL_KERNEL_BASIS = {
|
94 |
+
"2d": [
|
95 |
+
[[1]], # conv1 temporal kernel.
|
96 |
+
[[1]], # res2 temporal kernel.
|
97 |
+
[[1]], # res3 temporal kernel.
|
98 |
+
[[1]], # res4 temporal kernel.
|
99 |
+
[[1]], # res5 temporal kernel.
|
100 |
+
],
|
101 |
+
"c2d": [
|
102 |
+
[[1]], # conv1 temporal kernel.
|
103 |
+
[[1]], # res2 temporal kernel.
|
104 |
+
[[1]], # res3 temporal kernel.
|
105 |
+
[[1]], # res4 temporal kernel.
|
106 |
+
[[1]], # res5 temporal kernel.
|
107 |
+
],
|
108 |
+
"slow_c2d": [
|
109 |
+
[[1]], # conv1 temporal kernel.
|
110 |
+
[[1]], # res2 temporal kernel.
|
111 |
+
[[1]], # res3 temporal kernel.
|
112 |
+
[[1]], # res4 temporal kernel.
|
113 |
+
[[1]], # res5 temporal kernel.
|
114 |
+
],
|
115 |
+
"i3d": [
|
116 |
+
[[5]], # conv1 temporal kernel.
|
117 |
+
[[3]], # res2 temporal kernel.
|
118 |
+
[[3, 1]], # res3 temporal kernel.
|
119 |
+
[[3, 1]], # res4 temporal kernel.
|
120 |
+
[[1, 3]], # res5 temporal kernel.
|
121 |
+
],
|
122 |
+
"slow_i3d": [
|
123 |
+
[[5]], # conv1 temporal kernel.
|
124 |
+
[[3]], # res2 temporal kernel.
|
125 |
+
[[3, 1]], # res3 temporal kernel.
|
126 |
+
[[3, 1]], # res4 temporal kernel.
|
127 |
+
[[1, 3]], # res5 temporal kernel.
|
128 |
+
],
|
129 |
+
"slow": [
|
130 |
+
[[1]], # conv1 temporal kernel.
|
131 |
+
[[1]], # res2 temporal kernel.
|
132 |
+
[[1]], # res3 temporal kernel.
|
133 |
+
[[3]], # res4 temporal kernel.
|
134 |
+
[[3]], # res5 temporal kernel.
|
135 |
+
],
|
136 |
+
"slowfast": [
|
137 |
+
[[1], [5]], # conv1 temporal kernel for slow and fast pathway.
|
138 |
+
[[1], [3]], # res2 temporal kernel for slow and fast pathway.
|
139 |
+
[[1], [3]], # res3 temporal kernel for slow and fast pathway.
|
140 |
+
[[3], [3]], # res4 temporal kernel for slow and fast pathway.
|
141 |
+
[[3], [3]], # res5 temporal kernel for slow and fast pathway.
|
142 |
+
],
|
143 |
+
"x3d": [
|
144 |
+
[[5]], # conv1 temporal kernels.
|
145 |
+
[[3]], # res2 temporal kernels.
|
146 |
+
[[3]], # res3 temporal kernels.
|
147 |
+
[[3]], # res4 temporal kernels.
|
148 |
+
[[3]], # res5 temporal kernels.
|
149 |
+
],
|
150 |
+
}
|
151 |
+
|
152 |
+
# model stage depth
|
153 |
+
|
154 |
+
_MODEL_STAGE_DEPTH = {18: (2, 2, 2, 2), 50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
|
155 |
+
|
156 |
+
# X3D model
|
157 |
+
|
158 |
+
|
159 |
+
class X3D(nn.Module):
|
160 |
+
"""
|
161 |
+
X3D model builder. It builds a X3D network backbone, which is a ResNet.
|
162 |
+
|
163 |
+
Christoph Feichtenhofer.
|
164 |
+
"X3D: Expanding Architectures for Efficient Video Recognition."
|
165 |
+
https://arxiv.org/abs/2004.04730
|
166 |
+
"""
|
167 |
+
|
168 |
+
def __init__(self, cfg):
|
169 |
+
"""
|
170 |
+
The `__init__` method of any subclass should also contain these
|
171 |
+
arguments.
|
172 |
+
|
173 |
+
Args:
|
174 |
+
cfg (CfgNode): model building configs, details are in the
|
175 |
+
comments of the config file.
|
176 |
+
"""
|
177 |
+
super(X3D, self).__init__()
|
178 |
+
self.norm_module = get_norm(cfg)
|
179 |
+
self.enable_detection = cfg.DETECTION.ENABLE
|
180 |
+
self.num_pathways = 1
|
181 |
+
|
182 |
+
exp_stage = 2.0
|
183 |
+
self.dim_c1 = cfg.X3D.DIM_C1
|
184 |
+
|
185 |
+
self.dim_res2 = (
|
186 |
+
round_width(self.dim_c1, exp_stage, divisor=8)
|
187 |
+
if cfg.X3D.SCALE_RES2
|
188 |
+
else self.dim_c1
|
189 |
+
)
|
190 |
+
self.dim_res3 = round_width(self.dim_res2, exp_stage, divisor=8)
|
191 |
+
self.dim_res4 = round_width(self.dim_res3, exp_stage, divisor=8)
|
192 |
+
self.dim_res5 = round_width(self.dim_res4, exp_stage, divisor=8)
|
193 |
+
|
194 |
+
self.block_basis = [
|
195 |
+
# blocks, c, stride
|
196 |
+
[1, self.dim_res2, 2],
|
197 |
+
[2, self.dim_res3, 2],
|
198 |
+
[5, self.dim_res4, 2],
|
199 |
+
[3, self.dim_res5, 2],
|
200 |
+
]
|
201 |
+
self._construct_network(cfg)
|
202 |
+
init_weights(
|
203 |
+
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
|
204 |
+
)
|
205 |
+
|
206 |
+
def _round_repeats(self, repeats, multiplier):
|
207 |
+
"""Round number of layers based on depth multiplier."""
|
208 |
+
if not multiplier:
|
209 |
+
return repeats
|
210 |
+
return int(math.ceil(multiplier * repeats))
|
211 |
+
|
212 |
+
def _construct_network(self, cfg):
|
213 |
+
"""
|
214 |
+
Builds a single pathway X3D model.
|
215 |
+
|
216 |
+
Args:
|
217 |
+
cfg (CfgNode): model building configs, details are in the
|
218 |
+
comments of the config file.
|
219 |
+
"""
|
220 |
+
assert cfg.MODEL.ARCH in _POOL1.keys()
|
221 |
+
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
|
222 |
+
|
223 |
+
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
|
224 |
+
|
225 |
+
num_groups = cfg.RESNET.NUM_GROUPS
|
226 |
+
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
|
227 |
+
dim_inner = num_groups * width_per_group
|
228 |
+
|
229 |
+
w_mul = cfg.X3D.WIDTH_FACTOR
|
230 |
+
d_mul = cfg.X3D.DEPTH_FACTOR
|
231 |
+
dim_res1 = round_width(self.dim_c1, w_mul)
|
232 |
+
|
233 |
+
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
|
234 |
+
|
235 |
+
self.s1 = VideoModelStem(
|
236 |
+
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
|
237 |
+
dim_out=[dim_res1],
|
238 |
+
kernel=[temp_kernel[0][0] + [3, 3]],
|
239 |
+
stride=[[1, 2, 2]],
|
240 |
+
padding=[[temp_kernel[0][0][0] // 2, 1, 1]],
|
241 |
+
norm_module=self.norm_module,
|
242 |
+
stem_func_name="x3d_stem",
|
243 |
+
)
|
244 |
+
|
245 |
+
# blob_in = s1
|
246 |
+
dim_in = dim_res1
|
247 |
+
for stage, block in enumerate(self.block_basis):
|
248 |
+
dim_out = round_width(block[1], w_mul)
|
249 |
+
dim_inner = int(cfg.X3D.BOTTLENECK_FACTOR * dim_out)
|
250 |
+
|
251 |
+
n_rep = self._round_repeats(block[0], d_mul)
|
252 |
+
# start w res2 to follow convention
|
253 |
+
prefix = "s{}".format(stage + 2)
|
254 |
+
|
255 |
+
s = ResStage(
|
256 |
+
dim_in=[dim_in],
|
257 |
+
dim_out=[dim_out],
|
258 |
+
dim_inner=[dim_inner],
|
259 |
+
temp_kernel_sizes=temp_kernel[1],
|
260 |
+
stride=[block[2]],
|
261 |
+
num_blocks=[n_rep],
|
262 |
+
num_groups=[dim_inner] if cfg.X3D.CHANNELWISE_3x3x3 else [
|
263 |
+
num_groups],
|
264 |
+
num_block_temp_kernel=[n_rep],
|
265 |
+
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
|
266 |
+
nonlocal_group=cfg.NONLOCAL.GROUP[0],
|
267 |
+
nonlocal_pool=cfg.NONLOCAL.POOL[0],
|
268 |
+
instantiation=cfg.NONLOCAL.INSTANTIATION,
|
269 |
+
trans_func_name=cfg.RESNET.TRANS_FUNC,
|
270 |
+
stride_1x1=cfg.RESNET.STRIDE_1X1,
|
271 |
+
norm_module=self.norm_module,
|
272 |
+
dilation=cfg.RESNET.SPATIAL_DILATIONS[stage],
|
273 |
+
drop_connect_rate=cfg.MODEL.DROPCONNECT_RATE
|
274 |
+
* (stage + 2)
|
275 |
+
/ (len(self.block_basis) + 1),
|
276 |
+
)
|
277 |
+
dim_in = dim_out
|
278 |
+
self.add_module(prefix, s)
|
279 |
+
|
280 |
+
if self.enable_detection:
|
281 |
+
NotImplementedError
|
282 |
+
else:
|
283 |
+
spat_sz = int(math.ceil(cfg.DATA.TRAIN_CROP_SIZE / 32.0))
|
284 |
+
self.head = X3DHead(
|
285 |
+
dim_in=dim_out,
|
286 |
+
dim_inner=dim_inner,
|
287 |
+
dim_out=cfg.X3D.DIM_C5,
|
288 |
+
num_classes=cfg.MODEL.NUM_CLASSES,
|
289 |
+
pool_size=[cfg.DATA.NUM_FRAMES, spat_sz, spat_sz],
|
290 |
+
dropout_rate=cfg.MODEL.DROPOUT_RATE,
|
291 |
+
act_func=cfg.MODEL.HEAD_ACT,
|
292 |
+
bn_lin5_on=cfg.X3D.BN_LIN5,
|
293 |
+
)
|
294 |
+
|
295 |
+
def forward(self, x, bboxes=None):
|
296 |
+
for module in self.children():
|
297 |
+
x = module(x)
|
298 |
+
return x
|
299 |
+
|
300 |
+
def build_model(cfg, gpu_id=None):
|
301 |
+
if torch.cuda.is_available():
|
302 |
+
assert (
|
303 |
+
cfg.NUM_GPUS <= torch.cuda.device_count()
|
304 |
+
), "Cannot use more GPU devices than available"
|
305 |
+
else:
|
306 |
+
assert (
|
307 |
+
cfg.NUM_GPUS == 0
|
308 |
+
), "Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs."
|
309 |
+
|
310 |
+
# Construct the model
|
311 |
+
model = X3D(cfg)
|
312 |
+
|
313 |
+
if cfg.BN.NORM_TYPE == "sync_batchnorm_apex":
|
314 |
+
try:
|
315 |
+
import apex
|
316 |
+
except ImportError:
|
317 |
+
raise ImportError("APEX is required for this model, pelase install")
|
318 |
+
|
319 |
+
process_group = apex.parallel.create_syncbn_process_group(
|
320 |
+
group_size=cfg.BN.NUM_SYNC_DEVICES
|
321 |
+
)
|
322 |
+
model = apex.parallel.convert_syncbn_model(model, process_group=process_group)
|
323 |
+
|
324 |
+
if cfg.NUM_GPUS:
|
325 |
+
if gpu_id is None:
|
326 |
+
# Determine the GPU used by the current process
|
327 |
+
cur_device = torch.cuda.current_device()
|
328 |
+
else:
|
329 |
+
cur_device = gpu_id
|
330 |
+
# Transfer the model to the current GPU device
|
331 |
+
model = model.cuda(device=cur_device)
|
332 |
+
# Use multi-process data parallel model in the multi-gpu setting
|
333 |
+
if cfg.NUM_GPUS > 1:
|
334 |
+
# Make model replica operate on the current device
|
335 |
+
model = torch.nn.parallel.DistributedDataParallel(
|
336 |
+
module=model,
|
337 |
+
device_ids=[cur_device],
|
338 |
+
output_device=cur_device,
|
339 |
+
find_unused_parameters=(
|
340 |
+
True
|
341 |
+
if cfg.MODEL.DETACH_FINAL_FC
|
342 |
+
or cfg.MODEL.MODEL_NAME == "ContrastiveModel"
|
343 |
+
else False
|
344 |
+
),
|
345 |
+
)
|
346 |
+
if cfg.MODEL.FP16_ALLREDUCE:
|
347 |
+
model.register_comm_hook(
|
348 |
+
state=None, hook=comm_hooks_default.fp16_compress_hook
|
349 |
+
)
|
350 |
+
return model
|