diff --git a/.gitattributes b/.gitattributes index 0eaa66ae863dc5509db0f2cfbdd9cc97f4d98b92..b3da20d7422bcc5b2e852dd226d76195813e3f52 100644 --- a/.gitattributes +++ b/.gitattributes @@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc filter=lfs diff=lfs merge=lfs -text +pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/run-3pv3zoe0.wandb filter=lfs diff=lfs merge=lfs -text diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/args.yaml b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/args.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1cdc3d565656ca7b43534aa4b23e59a4f159d4ef --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/args.yaml @@ -0,0 +1,153 @@ +aa: null +amp: true +amp_dtype: float16 +amp_impl: native +aug_repeats: 0 +aug_splits: 0 +batch_size: 256 +bce_loss: false +bce_pos_weight: null +bce_sum: false +bce_target_thresh: null +bn_eps: null +bn_momentum: null +channels_last: false +checkpoint_hist: 10 +class_map: '' +clip_grad: null +clip_mode: norm +color_jitter: 0.4 +color_jitter_prob: null +cooldown_epochs: 0 +crop_pct: null +cutmix: 0.0 +cutmix_minmax: null +data: null +data_dir: null +dataset: hfds/datacomp/imagenet-1k-random-60.0-frac-1over4 +dataset_download: false +dataset_trust_remote_code: false +decay_epochs: 90 +decay_milestones: +- 90 +- 180 +- 270 +decay_rate: 0.1 +device: cuda +device_modules: null +dist_bn: reduce +drop: 0.0 +drop_block: null +drop_connect: null +drop_path: null +epoch_repeats: 0.0 +epochs: 150 +eval_metric: top1 +experiment: ImageNetTraining60.0-frac-1over4 +fast_norm: false +fuser: '' +gaussian_blur_prob: null +gp: null +grad_accum_steps: 1 +grad_checkpointing: false +grayscale_prob: null +head_init_bias: null +head_init_scale: null +hflip: 0.5 +img_size: null +in_chans: null +initial_checkpoint: '' +input_img_mode: null +input_key: null +input_size: null +interpolation: '' +jsd_loss: false +layer_decay: null +local_rank: 0 +log_interval: 50 +log_wandb: true +lr: 0.4 +lr_base: 0.1 +lr_base_scale: '' +lr_base_size: 256 +lr_cycle_decay: 0.5 +lr_cycle_limit: 1 +lr_cycle_mul: 1.0 +lr_k_decay: 1.0 +lr_noise: null +lr_noise_pct: 0.67 +lr_noise_std: 1.0 +mean: null +min_lr: 0 +mixup: 0.0 +mixup_mode: batch +mixup_off_epoch: 0 +mixup_prob: 1.0 +mixup_switch_prob: 0.5 +model: seresnet34 +model_dtype: null +model_ema: false +model_ema_decay: 0.9998 +model_ema_force_cpu: false +model_ema_warmup: false +model_kwargs: {} +momentum: 0.9 +no_aug: false +no_ddp_bb: false +no_prefetcher: false +no_resume_opt: false +num_classes: null +opt: sgd +opt_betas: null +opt_eps: null +opt_kwargs: {} +output: '' +patience_epochs: 10 +pin_mem: false +pretrained: false +pretrained_path: null +ratio: +- 0.75 +- 1.3333333333333333 +recount: 1 +recovery_interval: 0 +remode: pixel +reprob: 0.5 +resplit: false +resume: '' +save_images: false +scale: +- 0.08 +- 1.0 +sched: cosine +sched_on_updates: false +seed: 42 +smoothing: 0.1 +split_bn: false +start_epoch: null +std: null +sync_bn: false +synchronize_step: false +target_key: null +torchcompile: null +torchcompile_mode: null +torchscript: false +train_crop_mode: null +train_interpolation: random +train_num_samples: null +train_split: train +tta: 0 +use_multi_epochs_loader: false +val_num_samples: null +val_split: validation +validation_batch_size: null +vflip: 0.0 +wandb_project: ImageNetTraining60.0-frac-1over4 +wandb_resume_id: '' +wandb_tags: [] +warmup_epochs: 5 +warmup_lr: 1.0e-05 +warmup_prefix: false +weight_decay: 2.0e-05 +worker_seeding: all +workers: 4 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..b0527ce88899d49c33545a26f1ca85ea86f12671 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdbd13d19b869a7213e2c1905cc0a22f26fe24a51f80bdb639ad75d80a6eea10 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..819424aa04ee3ddeb47967df12954ba140aa1fc1 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e56e4947d8125a00e1af2da99266f75f3d269ab7fd33962dc47d20f4b792ac6 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..83fe0b52be19a12b6de3c4b3be999324f55400d9 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af7b6ae800c085dfe22056a2773fb98b0321dd184b047a17b02b6e103cc85471 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..6552221172cc9cc9869abab5f118fe35f50735dd --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d946ae688449756d1e9742433ff6c04e03c314645a5a5cecb72cabbeb659f319 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..9e9cb7c80c4f98a8910f8447c451aab5c407627d --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9137614894cc30a911966ca50da96ad5cfac4b1dcd197b0412fbc1235143d12 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..c81d70b5e358aca383c1eaf0af719f7d78e523ba --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d26455039e21c71b876e5ddcf8710a62fa38bd7be35dc283a7aaa1eb9522b6 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-56.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-56.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..f49784b5a80c6bef1c2459eff729df1cd0029d9d --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-56.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19ef1689c6dcc25bfbaef3227bf8d2034136aa3e741fa7ab7de080e267d81139 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-57.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-57.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..1820a7782e60223d4d3405440247806520385365 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-57.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:982e998ba16c1ee760d117baebc24ecb8e3209c72b670db7ab1b45c6cb828c56 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-59.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-59.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..e46ce929f1083acc0ed85c232a7e41c0c1c032f2 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-59.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d746db1aa6e78282b7e1f819ebc22f408c76bd8cba2018f23184db53f6c08e8e +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-61.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-61.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..a89729b8c3182ec6df1f71a1cc3fa5e29d508ccd --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/checkpoint-61.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d3b16cc52e55d89e2d6eaa633bb1e09e02fe17286e1ae106e3f7993213ca78e +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/last.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/last.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b231a18949d3c00e4db9bba0d55b3a1382124ee --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/last.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdbd57ab8a23262683b415a253458285ce03a69d9c76c435e041bd1a92fb6749 +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/model_best.pth.tar b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/model_best.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..e46ce929f1083acc0ed85c232a7e41c0c1c032f2 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/model_best.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d746db1aa6e78282b7e1f819ebc22f408c76bd8cba2018f23184db53f6c08e8e +size 175905122 diff --git a/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/summary.csv b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/summary.csv new file mode 100644 index 0000000000000000000000000000000000000000..8dbe239e76e1de0fdfb4f7b6bfd2ab743f7f2af0 --- /dev/null +++ b/pytorch-image-models/output/train/ImageNetTraining60.0-frac-1over4/summary.csv @@ -0,0 +1,151 @@ +epoch,train_loss,eval_loss,eval_top1,eval_top5,lr +0,6.9431939125061035,6.938741255187988,0.0779999999666214,0.5160000001525878,1e-05 +1,6.9015727043151855,6.7361672097778325,0.7839999990081787,3.3579999989318847,0.080008 +2,6.86771297454834,6.597556951599121,1.2600000004577636,4.526000001983642,0.160006 +3,6.843715667724609,6.518927840118408,1.8100000009155273,6.318000005187988,0.240004 +4,6.820131301879883,6.499558243103027,1.903999999885559,5.818000002441407,0.320002 +5,6.795845985412598,6.353902525634766,2.794000001220703,9.218000007019043,0.39890437907365467 +6,6.761190414428711,6.1470500317382815,3.7980000045776365,12.226000017089843,0.3984229402628956 +7,6.728061676025391,5.993895729370117,4.726000002593994,14.166000036621094,0.3978544665925977 +8,6.695578575134277,5.9294788291931155,5.476000002593994,15.958000021972657,0.39719920741410103 +9,6.665139198303223,5.8433496380615235,6.49200001159668,18.124000016479492,0.3964574501457378 +10,6.634745121002197,5.918696633300781,5.554000005950928,15.396000007324218,0.39562952014676117 +11,6.601431846618652,5.6566438526916505,8.090000014953613,21.510000021972655,0.3947157805746321 +12,6.568431854248047,5.548304178161621,9.55400000793457,23.796000008544922,0.3937166322257262 +13,6.5364532470703125,5.538003281402588,10.246000008544922,25.42400003051758,0.39263251335953164 +14,6.501322269439697,5.287481235809326,12.366000008850097,29.67200003234863,0.39146389950641347 +15,6.470037460327148,5.1611686392211915,13.138000017700195,31.27600000366211,0.39021130325903075 +16,6.436946868896484,5.2300570068359375,12.626000008850097,29.3320000402832,0.3888752740474963 +17,6.403077125549316,5.08999427154541,15.188,34.12399998901367,0.3874563978983783 +18,6.373705863952637,5.045312646179199,15.478000010375977,34.544,0.3859552971776503 +19,6.345022201538086,4.827629887542725,16.478000032348632,36.77799999267578,0.38437263031770014 +20,6.31539249420166,4.967710489959717,16.898000005493163,36.89000003540039,0.3827090915285202 +21,6.286393165588379,4.9297763357543944,17.06800000793457,37.756000034179685,0.3809654104932039 +22,6.260019302368164,4.716403913726807,19.27799999572754,41.00200004882812,0.3791423520478826 +23,6.233293533325195,4.595842574615479,20.880000010375976,43.32799998779297,0.37724071584624297 +24,6.206594944000244,4.621469277496338,21.137999993286133,43.3940000390625,0.3752613360087727 +25,6.181107997894287,4.68270667755127,21.076000007324218,43.779999982910155,0.37320508075688774 +26,6.155818939208984,4.480731481781006,23.350000003051758,46.747999993896485,0.3710728520321014 +27,6.132245063781738,4.58205734741211,21.125999998779296,43.17400005981445,0.368865585100403 +28,6.106157302856445,4.430375918273926,23.511999995117186,46.86000003173828,0.3665842481420199 +29,6.083177089691162,4.372953954162598,25.094000006713866,48.63599998168945,0.36422984182674084 +30,6.060105800628662,4.635352181854248,21.74200001037598,43.726000050048825,0.3618033988749895 +31,6.0379486083984375,4.421996555175781,24.37600004333496,47.73799997802735,0.3593059836048393 +32,6.016122341156006,4.4290398179626465,25.06200001953125,48.061999975585934,0.356738691465168 +33,5.992985725402832,4.377739123840332,24.748000018920898,48.03000000732422,0.3541026485551579 +34,5.971718788146973,4.329410245513916,25.422000014038087,49.2980000390625,0.3513990111303513 +35,5.950179100036621,4.250980354614258,26.839999993896484,50.13200002807617,0.3486289650954789 +36,5.925711154937744,4.305513177490234,26.862000026245116,50.41600001708984,0.34579372548428233 +37,5.907745361328125,4.332296510467529,25.720000025634764,48.91000004638672,0.3428945359265607 +38,5.886173248291016,4.302208611450196,25.623999998779297,48.79999997924805,0.33993266810267314 +39,5.863058567047119,4.187077933807373,28.2,51.88400002319336,0.33690942118573775 +40,5.8422112464904785,4.2177295486450195,27.964000013427736,51.70400002563476,0.3338261212717717 +41,5.821564674377441,4.1039053334045414,29.65000002319336,53.75599997314453,0.3306841207980211 +42,5.797905445098877,4.150510595703125,28.769999994506836,52.24800006103516,0.32748479794973795 +43,5.781131744384766,4.163139173583985,28.965999995117187,52.30800000732422,0.3242295560556621 +44,5.759064197540283,4.139593399505615,29.049999995117187,52.72400003173828,0.320919822972475 +45,5.736339569091797,4.124713208160401,29.761999970703126,52.88199995117188,0.31755705045849464 +46,5.714443206787109,4.118756468887329,29.864000028076173,53.24200000488281,0.3141427135368864 +47,5.691268444061279,4.099962968597412,29.39800007080078,52.71400003417969,0.31067830984866884 +48,5.669220924377441,4.176185885620117,28.84800002685547,51.88599996337891,0.30716535899579933 +49,5.650993347167969,4.226873478698731,28.6840000378418,51.31400004516602,0.3036054018746261 +50,5.626305103302002,4.127080610122681,29.295999973144532,51.73399998535156,0.30000000000000004 +51,5.604864597320557,4.156160714569092,29.07399998413086,51.81999998046875,0.2963507348203431 +52,5.58112907409668,4.160558588256836,28.74600003173828,51.189999958496095,0.29265920702397236 +53,5.562441825866699,4.241159338684082,28.058000018310548,50.40800001953125,0.2889270358369855 +54,5.538029670715332,4.130886701507569,29.330000024414062,51.35399999023438,0.28515585831301454 +55,5.520533084869385,4.187742267150879,29.152000034179686,51.24000003051758,0.28134732861516004 +56,5.494744300842285,4.162322541198731,29.330000010375976,50.95200004150391,0.2775031172904206 +57,5.473479747772217,4.173910147705078,29.294000010375978,51.52399995849609,0.27362491053693566 +58,5.4495744705200195,4.462543117523193,25.114000014038087,45.26600001220703,0.26971440946436304 +59,5.426791667938232,4.113894800415039,30.15600001953125,52.042000047607424,0.26577332934771664 +60,5.404280185699463,4.23888961807251,28.214000012817383,49.11800001098633,0.2618033988749895 +61,5.384984970092773,4.142473132171631,29.25200003051758,50.66400000366211,0.25780635938889435 +62,5.35830020904541,4.195645713806153,28.885999978637695,50.28199999267578,0.25378396412305315 +63,5.332411289215088,4.259078891601563,28.104000010375977,48.86200004882812,0.24973797743297094 +64,5.316403865814209,4.19121046661377,29.00799998413086,50.08800004882813,0.24567017402213118 +65,5.290327548980713,4.229200247344971,28.388000015869142,49.00600009277344,0.2415823381635519 +66,5.268279552459717,4.241606517028808,28.468000005493163,49.18199999511719,0.23747626291714496 +67,5.246063232421875,4.338318730163574,27.24000001220703,47.20200005859375,0.23335374934322048 +68,5.216652870178223,4.3364053242492675,26.64800001220703,46.436000020751955,0.22921660571248237 +69,5.1979570388793945,4.386739821624756,26.31200001525879,46.072000061035155,0.22506664671286086 +70,5.170787811279297,4.292436854553222,27.540000014648438,47.748000029296875,0.22090569265353077 +71,5.148033618927002,4.261131006317139,27.456000025634765,47.97000006469727,0.21673556866646312 +72,5.122681140899658,4.29786947479248,27.742000006713866,47.55200002929688,0.21255810390586272 +73,5.102504730224609,4.2951887347412105,27.90800002319336,47.82400002197266,0.20837513074583996 +74,5.071815490722656,4.398047969512939,26.700000017700194,45.82999999023438,0.20418848397667141 +75,5.052424430847168,4.316555525817871,27.584000025634765,47.44000006103516,0.2 +76,5.024328231811523,4.425414015350341,26.04800001098633,44.82400004638672,0.1958115160233287 +77,5.005191802978516,4.354408746643067,27.03800003112793,46.26000009033203,0.19162486925416009 +78,4.978161811828613,4.374107049102784,26.780000023803712,45.85000005249024,0.1874418960941374 +79,4.958959102630615,4.520292028503418,25.146000010375978,43.57600001098633,0.1832644313335369 +80,4.929296016693115,4.510488933715821,25.61000002746582,43.778000017089845,0.17909430734646936 +81,4.904443264007568,4.440168070526123,26.198000014038087,44.84200006591797,0.1749333532871392 +82,4.877249717712402,4.465066349182129,26.00199999511719,44.64999999267578,0.17078339428751776 +83,4.854767799377441,4.4920959765625,25.551999997558593,44.138000068359375,0.16664625065677952 +84,4.827182769775391,4.530396635437012,25.448000021972657,43.43600004638672,0.16252373708285509 +85,4.809326171875,4.61370787689209,24.430000024414063,42.26600003417969,0.15841766183644812 +86,4.776586055755615,4.647791392211914,24.29000001098633,41.962000029296874,0.1543298259778689 +87,4.749677658081055,4.581302818603516,24.678000009155273,42.941999963378905,0.15026202256702909 +88,4.728340148925781,4.6014200486755374,24.30000003479004,42.37600005371094,0.14621603587694693 +89,4.699658393859863,4.579655709381104,24.797999995117188,42.71999999267578,0.14219364061110565 +90,4.677910327911377,4.708592195892334,23.104000042114258,40.8500000366211,0.13819660112501053 +91,4.646760940551758,4.764935299072266,22.988000017700195,40.17600001953125,0.13422667065228336 +92,4.622101783752441,4.729629945373535,23.346000029296874,40.774000078125,0.130285590535637 +93,4.6001200675964355,4.613048763427734,24.37600003540039,42.145999979248046,0.12637508946306447 +94,4.5732421875,4.722900708160401,23.43200000793457,40.80999998901367,0.12249688270957942 +95,4.54327917098999,4.760393147277832,23.07000000793457,40.33000006958008,0.11865267138483995 +96,4.515352249145508,4.600982042541504,24.59399998413086,42.51400003295898,0.11484414168698548 +97,4.488563537597656,4.7580890507507325,23.30200001220703,40.47600002807617,0.11107296416301456 +98,4.465057849884033,4.705651960449218,23.781999975585936,40.995999982910156,0.10734079297602771 +99,4.433521747589111,4.747479717712403,23.35800001831055,40.62000001831055,0.10364926517965692 +100,4.412936687469482,4.7600954281616215,23.154000026855467,40.42600005004883,0.09999999999999996 +101,4.387495040893555,4.975237618713379,21.509999990844726,37.89200000366211,0.09639459812537399 +102,4.357723236083984,4.819995206604004,22.978000026245116,39.84199998535156,0.09283464100420072 +103,4.334304332733154,4.792036187744141,22.73000001586914,39.95799998535156,0.0893216901513312 +104,4.2998528480529785,4.844485811309815,22.57600001281738,39.310000041503905,0.08585728646311369 +105,4.274172306060791,5.002657133331299,20.984000005493165,37.25399999511719,0.0824429495415054 +106,4.248683929443359,4.8214094943237304,22.416000007324218,39.36200003173828,0.07908017702752504 +107,4.222282886505127,4.8479905697631835,22.296000006713868,39.231999985351564,0.07577044394433795 +108,4.196178913116455,4.98984641494751,21.334000009765624,37.751999990234374,0.07251520205026206 +109,4.168429851531982,5.016520691680908,21.03800002075195,37.57799998779297,0.06931587920197897 +110,4.141824722290039,5.049998479766845,20.712000008544923,37.19600004394531,0.06617387872822836 +111,4.12200403213501,4.973765448760986,21.33600003601074,38.01800004638672,0.06309057881426226 +112,4.089399337768555,5.010670317535401,21.222000009155273,37.800000032958984,0.060067331897326895 +113,4.066384315490723,4.945170684509278,22.04600001586914,38.55399998779297,0.057105464073439374 +114,4.035727024078369,5.037110093536377,21.167999990844727,37.49800002197266,0.05420627451571774 +115,4.01347541809082,4.992048675537109,21.403999987792968,37.749999979248045,0.051371034904521135 +116,3.986830472946167,4.994148792266846,21.476000025634765,37.718000006103516,0.04860098886964875 +117,3.961311101913452,5.099753927307129,20.652000017089843,36.63400004638672,0.04589735144484217 +118,3.9379918575286865,5.104892230377197,20.375999982910155,36.44999994384766,0.04326130853483206 +119,3.9125819206237793,5.124689064788818,20.242000021972657,36.39200000244141,0.04069401639516075 +120,3.8880300521850586,5.097174103546142,20.818000030517577,36.939999954833986,0.03819660112501053 +121,3.86423659324646,5.035004398040772,20.993999998779298,37.15000001953125,0.035770158173259195 +122,3.8457257747650146,5.0661964001464845,21.14200003051758,37.25399999267578,0.03341575185798012 +123,3.822751760482788,5.184377702331543,19.766000025634767,35.86600002197266,0.031134414899596986 +124,3.799717903137207,5.045239634857178,21.172000009155273,37.38200004394531,0.02892714796789868 +125,3.774881362915039,5.152091053009033,20.188000011596678,36.28800000488281,0.026794919243112305 +126,3.75713849067688,5.215371259002685,19.90200000854492,35.71800002197266,0.024738663991227285 +127,3.7324070930480957,5.141581241149902,20.404000028076172,36.27000003540039,0.022759284153757053 +128,3.71870493888855,5.137493869781494,20.19800000366211,36.19000005981445,0.020857647952117465 +129,3.69716477394104,5.2152932823181155,20.02000001953125,35.74599997314453,0.01903458950679613 +130,3.6775317192077637,5.155171538848877,20.214000021972655,36.12400001342773,0.01729090847147985 +131,3.6619601249694824,5.185123553771973,20.13600001953125,35.99200001953125,0.015627369682299875 +132,3.6486687660217285,5.164827609558105,20.22400003051758,36.130000021972656,0.014044702822349731 +133,3.6263937950134277,5.1940673951721195,20.004000032958984,36.01599996826172,0.01254360210162171 +134,3.61950421333313,5.16515736541748,20.171999998168946,36.218000035400394,0.0111247259525038 +135,3.6119253635406494,5.149724328308105,20.431999998168944,36.224000048828124,0.009788696740969294 +136,3.594918727874756,5.183282818603516,20.04000001953125,36.06799996826172,0.008536100493586551 +137,3.5832762718200684,5.191941630706787,19.936000008544923,35.86799997558594,0.007367486640468379 +138,3.5745418071746826,5.16351744506836,20.136000014038085,36.190000048828125,0.0062833677742737855 +139,3.5626986026763916,5.199679564971924,19.880000016479492,35.760000029296876,0.005284219425367942 +140,3.5521106719970703,5.160710575256347,20.281999987182616,36.246000024414066,0.004370479853238885 +141,3.5405547618865967,5.197849544677735,19.89400001647949,35.89200003540039,0.003542549854262278 +142,3.5347208976745605,5.150363023071289,20.26000000061035,36.396000021972654,0.0028007925858990037 +143,3.5302460193634033,5.188460216979981,19.919999979248047,36.0280000378418,0.0021455334074023335 +144,3.5257999897003174,5.187040548095703,19.931999976196288,35.94799997314453,0.0015770597371044472 +145,3.521711826324463,5.202165876159668,19.904000032958983,35.87599997314453,0.001095620926345342 +146,3.515748977661133,5.20309140914917,19.889999995117186,35.81800001098633,0.000701428150099126 +147,3.5179338455200195,5.18720495803833,20.090000014038086,36.0179999975586,0.00039465431434568824 +148,3.5117876529693604,5.1865044105529785,20.016000014038084,35.972000008544924,0.00017543398022832336 +149,3.51277756690979,5.190892289123535,19.97199996826172,36.09400004882813,4.3863305030900085e-05 diff --git a/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc index 552d6374992640999edb6e1e143b6df6ef248ae5..f38cb19d0c68e34e907105c922c8f97408d126a1 100644 Binary files a/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc b/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc index 1414f721b57d83088fbe0a4ee1a086f80f952e08..15b224a3e803e22d6449220a877598c139829f9a 100644 Binary files a/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc and b/pytorch-image-models/timm/__pycache__/version.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc index e5a6ccdfbc04d2e683aaed3c67d46ed8b626512b..341636579914fd964a9a9fef7d8ad57c964b2e32 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc index 3f7f4e45db812af1e873cedd53ab7a9a145957fc..db228d0f1e7c3031f28d6b4e8ead427c4c5065ff 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/auto_augment.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc index 56a723b36cff0a0a3a9d32adc6581c7a8b7d310e..2d2f35873d913ac13e6e4ce9ae8920103be9c67f 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/config.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc index de2d8cf5eacef6e9e450a0e5c124f22b35b78192..7c842cf410132e76a9834f091f98ac63eec8476f 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/constants.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc index 2e79001277a9111ad335bda216b225281e1b2c11..ddf7751bb6b0fc9a4c9d7e45e6aa43cf64cf223a 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/dataset.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc index d39744a3096c4d92badc06b052ff3d9a219c781d..4fd8aa868d973348206cc215e74fa2e9a6e0a9bd 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/dataset_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc index c8d1134a008aa28383f5b3474998a197eeb3653b..bbdc29f82ce06548931d6b7c506648aed81eabb8 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/dataset_info.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc index 89fac7cade9608303986dfe127f16eb52c37455f..b88d56c1a2d8cc0f79c2a3f0a532cd31940c76d5 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/distributed_sampler.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc index a4ad06faa248ae36d0b516340b8f10f0d2235e3b..561de8a02610eb8febd3f8ce3be00f82f2169846 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/imagenet_info.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc index 39c71c1bde9b71ccd409ebe0137d66f786e36258..7a5520e180aae9005fcf06d82e619892680fc6d6 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/loader.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc index c97ae970f4c46b2f54e114b825be0072eb378473..f652fec988fb9842a694dbb113335e7be9064eee 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/mixup.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc index 79a4e3492b0cc19d5a9392fcc4117c7b8bb1270e..1ddbb04bd4b2fadf810c5ce8105a4a90496c6391 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/random_erasing.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc index 478df8b307085b12d31aa943b97b84a3505ba2b2..18470dbc3eca184a6e936817394ea8f4de3d1026 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/real_labels.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc index cd2036568c3c592710d226175303e3f312a4da7b..f9b36b7e6cfdc0d84eab0954a37470001df04906 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/transforms.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc b/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc index 9f74b312593bf399c853e76325da780fa193a48f..8ad1fafed6905ec4187114cebe629e6a25ccac7d 100644 Binary files a/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc and b/pytorch-image-models/timm/data/__pycache__/transforms_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc index 535fffa05f164e7cee927587c5d1b8444b6162aa..d6e14e0362888c6a867f1385a2336414eefb9cab 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc index 71face304c8dd9924b781b996b70f884ec36458a..f9b472bd52d99ff37eefbb8ba575caeab90401b8 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/class_map.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc index b9dba08b885bf0f19b441a0db196d2d3011811d9..ed701bf06ac86b4dec40908c83479c0f7a61f4e3 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/img_extensions.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc index 03226141e69cffa6eabe7af1bd8f8677a0240877..e17c11062998a20cce6383ee338d1e70e437bce8 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc index dcc31950f3a625041d0faeb0b641193d412a3c1c..f81ac024c7aaac197eaae00971c436845242175a 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc index abeac21e4a69316fe3797b22c04958b31e38291c..76f2a1e5a62940dd6085f95f151917bace398005 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_hfds.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc index 7a8bba49a05dea43f17a443009fc2089e517bfd2..729597d08bbd438cb1ee60610fd70db59f2ee849 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_folder.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc index 511eb9ba50513902d84f1f8b88c6288b4b055a4d..4b5d2cec44678489813aa6d642d3adbe0c4a8df8 100644 Binary files a/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc and b/pytorch-image-models/timm/data/readers/__pycache__/reader_image_in_tar.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc index b3bb175805990a664ae82651869a11227ef27734..dcbde50367f629ecf056369fc3a2874b5aacc455 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc index 81063c17ed253fa00f42b707993ddb49598626c4..116e5d8afb91bb6d4666f8c761b39c87001b9012 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/activations.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc index ced3c619055bd4a903beeebe4ae0caa4aee7ca35..368bd659a0a65abfa5ddb25fd90737e9b3588bfb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/activations_me.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc index c89a74030170b8a4f3c6ad83626d1d7b89b4a484..09b8846f0402819e231c33ffa6d038515e60d357 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/adaptive_avgmax_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc index aa31ec11cad6cfc7a65fbbb86cee0de99ffb07da..66d6928c04004f3ab783668cf92208d83287b50d 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/attention2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc index 2fdccfeee21d92cf4e60fea80a2815cff7392154..66f4f463e134ad6970fa82f9dc9e830b95df2d90 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/attention_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc index 4bb2491968184a8b952996a1d51a2fbd92b27772..addb62139b295ab5efcb4ea3ffb6e5ee688f6119 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/attention_pool2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc index d3ccd3b969a2cf37d92e941c77031b4e2215d655..8e8838c7f6a948db1f7345b402acfd1155087d02 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/blur_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc index 9c0886a7abf00ccb72344524cd3606c36eae1d8e..74dcdc463e9cb3938b9046087891bba287482fa0 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/bottleneck_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc index 724346bdc512cfbcf6645d6f3fef3ed023154834..96906a0739441259535ff8c5124d6cfae6d32318 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc index 0c392f9ea32dc3b92ccfb7374e4e0aa9ea263b7f..5c66338b0718f9518f7c8cc76e61e8619f26238e 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc index 0d96c1f8912e23797d7dfc2a2cd620848682fb31..5c6da34ad78a035b5eacc0de98a5cb016dc9aed0 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/cond_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc index 9924996ae6c7c862c829b061855bf35cc973dc0b..b4fe0706d2f89007517fac9dbc529f826d920657 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc index a384e343bb3ee61cb26ccddd49156d7496fa7536..b8e57c2446521c7390d492d08883fafda8018d33 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc index 3247f57e63e8d9cb9c9922bb9d5d91d25116f63a..9803340fbe0f89892d7eb8de1364509a3f04ff62 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/conv_bn_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc index f70c48e55505978399aee11c114a0bbd730855b9..85814284d550b9e3039383e33b2f01dbf11cfbfb 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc index 1ebf6cae59c4a81469c7136ffd3daa668e27928d..e26518bed7c1f7820d2ae4c73d7ff06533ca5103 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc index 4e5d238acf33c5c8d8274681e5803ed3e4a5786f..4fa165fa6716610e34dcc6db0243c16553900507 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc index ecf57034960c48107af494e01d45dd31c7550fde..bc1bff9d2e597545ac66e98b092e4141607c9198 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc index f0562a12685e0ff5fb827d8c10e7c40960b6dde1..d365410e7c79e75f8ecacad8deec05d6e2a916e7 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc index 55a6cf71828d5a277fcd593c698bdd965c42bd2e..8f46c39fae89e465efc1cae4838cd508e1d0abde 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/drop.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc index 1ea280149929a24aba6ed89dd20e656147cb45c5..dc3d2a21c80dc2de216bf6ff9a32040bbe2f7452 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/eca.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc index 4d289d941253ce99ed72787d00ac3b98cf110d7d..17005d44ad5214fd601886c65d3c378b4a20e74f 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc index 99440a3f2e0d736b0e60da076337dd2bdd977e7c..293f6a1541dcc520b49f33418350997d87006055 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc index ee7c33c05bc7c94e0aa03dd87194265a40973add..6695456723107fde349a9e67de237adaa2373371 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc index cedddcc7bb5fd9b3b3291f991bb78bb0bdaeb8df..031668647eb4a875c6937db43fd91f38023ad9f5 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/format.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc index db3a674d245dde2edf2c1bb64e47b2c7b4ff2f91..2cc00f96e8cc5046bee99ea535d2dfbffd86b390 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/gather_excite.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc index e1f581f79a08b659311997ee7808f3bbbd6088ed..411e7d46f4cda7618869449e89664dafb7cb44d4 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc index 13ee39ea4ddcb94f55fc531d4cddc70f2ab9791a..8c13e135f07e41c276cafce3ee557f27c577830b 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/grid.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc index b44b80c493e2f920807012bd1d7d56b76aab7220..85d31edd5beb8344a61a2314f37909ba77884cc2 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/grn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc index c0fe7ae384e8e3e64c7b3c86a5c3f9d1cdc61bd6..22286a8044f8cd5bfd6e16a86f2a8bbae7a2baad 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/halo_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc index 91185f6dd587f4d216875e4d8f7c8655363f4e77..ccc4198fd8785e6617816fe0b58b280029fc1b69 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/helpers.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc index 0979920f2c5e01945434a1f024ef8b01b58da7d8..02d389d40823ee5a259d49a402696a1d209232f7 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/hybrid_embed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc index bbe7ef56e70fdca4f06a276dcca09c033f8c35b5..0f0404703573ecccc6f60848a3b5f68d527fd1a4 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/inplace_abn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc index 321532639cb5107c00eba39342ab0bd7f0859a52..b6758840cee417f0d42c3aae0b7398d64301de75 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc index 6b395f9e44be6c34bff10638f42650c364d7a2aa..c504f53e7b2dfa3eaf3729faaacfd6829966d598 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/lambda_layer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc index 2b8f93cea49fb438bef8ffb467a63bd506375c95..7470bdef2750251995bc63b3a3b4a4168db2609b 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/layer_scale.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc index 1ac7068e7f5e405abaa2159d9686c691c9f21f7e..302021e9c6107962e0789e96268b3b2da4492709 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/linear.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc index 9dbdb4079f0243a6ba35c664c2ded333d7803a44..0cfb5638dbd4bd727041bf622c5909057614f8a1 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc index 262236b30be2ff56346ae174d31cdfef6aef6efb..6ebb016a39e9c51b8169dd9c2c84f566b0a723ab 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/mlp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc index 87c0aa342d1accd921ae77b5c8e717487de84723..548f052420ba7a52ed49c942af52f8d2687a9eaf 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/non_local_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc index e64a2109e343f2890e89ac3bbc41f1958ba2cc6d..fd1c6be9db040fc3b22e9cd5db44797af4b752fe 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc index 01b6b3d0571bc9e59615e676f7e022421a3af9a3..3e3a3b20354b140d330676af454cd507507e90df 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc index 6dc86a07e9a38840830801ebaf7a33219c4a76e1..e418ba0a1317f909802104c3c7f3513757ffd4c1 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc index 5be666b910ae92933da98f256cca425b9ee63e96..daefb1a391beaad72ef7609b0e19c85715e0a1fc 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc index 0f0b9550d19b28c2a964c6cf30b4965bc49dc3b8..7eca7456dc80f8dcb7aa078f91f6663888ba91d9 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/patch_embed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc index 1e7a68beb105016bc1ac8d706b010911df61abb9..e5819423abbf9e4072a0972d4ae618a71e6c1c38 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pool2d_same.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc index c23aea7b493d4f930d0ccf009c746c242f0f8370..d141e4748a4cfa7135e05447e3773eaa811ed0af 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pos_embed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc index 8363e810f3c7871caef6308224e3e5ff39303ec9..314b013c7472dc9b03be6998caf436704938ad95 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pos_embed_rel.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc index 4d3df1c5fd0a0da7df8c093679101b1e1f935ec7..b6b196c8c6931a4c8a107aa3bf3da85439529ff3 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/pos_embed_sincos.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc index 9251cf44d3db0a969d673e72b6905bbe4ecf52cb..d9b83265b0f6b0953a4e4d35926bba53a2803053 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/selective_kernel.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc index 54aefb6230aee02a002973b6a4b9fd2e524336d5..1bfed08176272a17b759852b2c83b1eb87b41b5a 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/separable_conv.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc index 0df940086ffe4cc57d9c0f7860fb8f941300963a..5d52c7e335c54de69bac7503f493ba39fc76a30a 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc index 2ae30b7d8f897c8f2485f5120dd450881ccade83..e64e9e9138f68a50b21b99b5df31b0d5ad9b35b3 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/split_attn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc index 73e83109353c7e72d0c3947ac5ce25eb3200f9b9..9965d468da242143288ba01739aa744e18f2c8ee 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc index 145803feb0b1a141b004f7c3e5f6304e1ab11bdb..5fc6f7d9322581e35ae76c1283ecd183779bfac6 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/squeeze_excite.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc index 5c67ab9269e8ccb50f07c5cd90fc14e10004976e..9ab3da727741d0e4a480fde4a66ab9e02e8a632d 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/std_conv.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc index c4940d0fbf12fe83e93d6814ea76d1407f1c39ad..86190c301426dda97f9853ab8aad1c01cd7d6cab 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/test_time_pool.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc index 9102f176134c641ca2a82d888e6ece6f180dec65..297cb3ff766ed31cc81d98c4661fa68a8a22766d 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/trace_utils.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc index 9f658b50e549d18e521141b201cdb3eaeaf63a5a..5ac9f886655dfe3308cf26ce8ec92c8573fbdc73 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/typing.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc b/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc index 1350e6b0b1076a2718f278fda81c7c805f71c13e..5cfd6a582923ea8996ea3094bf3b16f7cf8d129d 100644 Binary files a/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc and b/pytorch-image-models/timm/layers/__pycache__/weight_init.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc index ae18d8bd746e207d724b7c0e99b3d25ab4091a71..464980e24fb72e00f91f0d098b465bc6172790d2 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc index c5283425306cf4c06a6f3d123c885a00d473767c..e241ca1a3d8401d3c675db23adbe173b84bb8208 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/asymmetric_loss.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc index c7db5d8b29df7ac6e61d6aea0d6672a3679cac32..50e71df2a3322d56ab28bc3c042c44a7811136bb 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/binary_cross_entropy.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc index f4ab6076c469a6927b0e2387e616c205d5ceba69..99c14b8ac2fc6ba211dddb0324351a32d7fa5700 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/cross_entropy.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc b/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc index 049a63c4f495ec6a8907032b134f15f490b16f1b..0b7cab54d62b136fb3bb960ea2bfa8a584e08f28 100644 Binary files a/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc and b/pytorch-image-models/timm/loss/__pycache__/jsd.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc index 80439ad04eceed92c0353834319a6df44d2a8b18..3c09b067bd12a2a15ee546b56a49633c0d5abd8b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc index f853a95c81fcfa6118c66fb7ad00911a4cc9ee93..8cbd68054ef891ed3189fb659cc8e2a17faf719d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc index 78efab8faa5b3a2d0dc3f2ad336fb1912dd63c7c..daf7b29e2807ba4e14b834912f205665ec8041b5 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc index 5eb90ec7e4e31781b0bd25ae040f21932973ac60..132b701623e0a1b95fae6f467fcac4db9789be09 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_efficientnet_builder.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc index 8530c061ee44215a76d9bb3893570e5bd9394f0f..3a6b06ad4ce9180a8f1f63d0a4f4fb97fcee9e3b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc index 3a6b47a31df914368d4dcaefe6c1eede4b449c77..b8ad3ca91b71881f9aa69b294723d70e29e47e2c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_features.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc index 99ac6e5653acf1d0ad7b1affd282132009702d4f..ec91fdbd531611e79ce3492aa8faa7581aa4466e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc index cebf78f4b6311b0dc36fd242eeb4d169594305f4..09045f00329ca151ce93549a2afb4dfa2a490ef4 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc index 392b413235320bf50875b212ffb6e59aff254f76..6ca6f45d4cfb9c27beafbf5c3f299b62187d719e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc index 1064f8844dd42e3bef34074e9da40af0149358c2..4f639762cf379d3ccf80e0edc2566977e9419566 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_manipulate.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc index 7c8121b30b3e9dbaba8faf9b7b8ede8303b4f4f7..4c4e86118f26999a77f572290072ddeca8a2855a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_pretrained.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc index d66901fcf22416be704dba99783fcb410d527319..d3d667c761c19539bbdaa247f7cb0e0447a95654 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_prune.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc index 68e4a31b0e4414d42359e8bf754c76d3073ebc10..286a7885550f5e2d7370c92124d9e026066b761e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/_registry.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc index 9021a96ef006504030c349bf27400d6f5dcbce0a..fcf1af79c4e416604117a3e1a736289334c5de9b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc index b5b6e9d1800871d8f25b82ac499e3583018aa446..4303ac2a99520e7b38902be4cb9f93522062f0d1 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc index 2db285219ae40026d247377b15724f236aafe9e3..f22b557ad308cf07c340a51892d9a4dc591fe16d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc index 063dae53fa4764e612f391d537421525a3f84851..91c4481ef941d670523dc8fa51c2f0d7310d95c7 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc index 56c63f10b11fe33fae949f6313a1fa5f4ef1a14c..5f85c46e28c01f6d72916fd11ef6b8271b64f0d2 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/coat.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc index 6a727753a1fb022490233993de8c5b5f0abbb1aa..0e73f3b79c5fd7c64940cb5d67d635af0df90f1f 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc index 5c101b588cbd225f00c5a3e70f49eded7587a6c6..faddaf5c784ca13d2a7a2d27fd5eec4950b630ae 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc index 601213f1e9ee776ae8f6d78e4c67ec07811ae0e3..93e13ce84f6553460961ac7b46abb565ad31b3b0 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc index 3239c9e4ba525b541c55568c39f571c43dbe3ed4..be3bededd41ee1771e88bbe9531348f47b2df86b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/crossvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc index 68a57caf2e96ada45fe749ce0ad4bfeccea65c8a..db67eae5a6a2360135193719595c467047e55271 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/cspnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc index e68aec9f566e386da11316dd6ae71cb841b92f26..40a8c857461af305b7b6611de2497efd45c12034 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/davit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc index 208c5f382c322fc35ef884cbd53ad3a52afcfd26..7770f3db4d39f2a63396e851bff9181b03d55a70 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/deit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc index 8af111a12f2f01426346369b7b784c45eceab3f7..e108beb113e27d01d22126da7f924fdcade2dd19 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc index d6d5dd123c25ef8e24158b5261ff4d5a07916d38..f6933f52b1b9bcb0692080a6568e8c51737f617c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/dla.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc index 4fe1decdf3ee8920028858f0aa1ca2b1e7700d59..7e05b5d3ba8c343229bae1ee17e1a827f5b2f0cf 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/dpn.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc index 81bdc32a9496ab73e2629486e3f19fb7536c8b1c..538b6f1d516acf162c2fc50774c5f515b9bb1d45 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc index 0086d0b0279a1f38a2fe2e327bfd0db9dec4c4af..25e094c4d9b9c9e71ae43ee0d9513aa6e388969c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc index f279bde37bc194d48d9a6c1fa768bfa4f8022232..197a72e8dca923674009f447e9a46c37b7090682 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientformer_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc index b973e3998bd25b9481d532f77b4d8d85df636a08..9b50a92493f17ec6752b5eb062da371534ed5465 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc index a1a61b61a63264c4d2856066a3d0b47f1b2b6bff..5b3c85ced561df8c76dfc6d5a00a38cd629475bb 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientvit_mit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc index a354388f49d475991ded991acb8caa204953f2d2..6370346b31f309e69ff7d353709571923ad40b98 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/efficientvit_msra.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc index ee60721e71b7dbc1316eec30ead6f87a7a3c8a62..754b5d514688ecdefebde41b253bd9d5873892d7 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/eva.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc index 97073cc2c7b1f15e96aa0c35ee6a6e27f48fee32..07f528682d6e6cb72e8c38d4dd088f287d413cdd 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/fastvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc index c91828ef631103c3991c13c8b2ba523d8726e4b8..7d8e58955b5af239654d609037d880735f7dc4ad 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/focalnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc index a5baed166bde474ce2cb20ec41be25d47216c4d2..29bd6e7d39f333b7d4975cdb5b2e6d89360ab8a4 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc index 3531cfb0696b65eb0c257ad8ee912a09851fa5ce..7590158d335f802637234ec90108e1c350151b9b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc index ca731f3c4d15e8c89af02995d9cdc66575686559..55088c46ba3e70d59f023d1ee563ce9fee480c12 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hardcorenas.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc index 0b9a6dd5c4930148be14cd2888611118bee7c567..e5a5b1ef66bf97de33bb47ede801f416a53fd570 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hgnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc index fd7cb50bb54c12cb0bee4507837529b845244a1a..d981a116c9315a41b6600ceafa47919af0ea26e3 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hiera.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc index 7c2867fe8085293f928b36c2b2abdd89598d484b..2e5c285bd3b731ab07cbdad5ff25bf9e63b1e45c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hieradet_sam2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc index f1c05c59deec90f50d337d42c2472cb5ff37e44f..be7f11ec85d3220f96c77b92a8696b46ceb8201c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc index be159d97f56a48938d94e2ee8068fb56b9041faa..424171f6038dbd459ee5ed95e79ad9480fbbbc61 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_next.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc index 5a09d300f73011b13521b770390eb14c604cc8e7..8f7a3d3c3f0445607792187700a19faf683a3d56 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_resnet_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc index f173a84f92f4576dde9cc452dca46c904ab1ecad..ab552420b47fc83961360aec96948a3e3cb62679 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_v3.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc index 7513cca462f35acecc0d227b8afcfbb2cdc5d501..cfc1ebe74a337ad6fe1ba075f4da7faf2d5c9c48 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/inception_v4.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc index 8a4bbc860aa1e83133f55da8f92e9b5de4d548fb..d83773c0fd6c135b09b73f71396567bb495d516e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/levit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc index bcc7bd7fccfac1d2029a44c345d98c616459eeef..d143bb1de7a543bfcbacd7312a440a6eaea9d5d8 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc index 3301c9ad3b78de7cc2476bd0b11c41d8d217bab7..392f34fedd0e4aa20bd3da5a010d13a24d414543 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc index 4c8739e7bacd3e906354103a5c6fc871297ed8d1..9a5a1b30ae0d4cf764682b24cc3200d1fd22fa46 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc index 7d6dc8888cafff66e03a6ed4f3ac2e842d6854ea..ea27f268397f72e8e2b2b6c2313eed11814a27a0 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc index a4d690952f6b07ebba0df094c72feb0b4341807e..df2c766aab14b7dca1209c1e5732d04ccfb5bb37 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc index 99a0100fbeb98ca8e1267202096f14fd39bb9618..3325483f82ff59a64cf8ccef0924cd4728673230 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mobilevit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc index 4bdde4669f4515121db93ec6b22a1e7407b8dd97..82491d38287630c1d95c6e21f20926306b4589e7 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/mvitv2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc index b1af16964d004fcb19678276d476f6201dd27341..92c036c18670ffbb27eafc52406aa844b189d5ff 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nasnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc index db450f9e644b18ed0ca4fd749c2e01d0376abc12..cb2ead555d9fdcc76b387297746fe8ddaed8294d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nest.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc index ff2a50b477f897b91b3491b6083562da0396355c..d5666183d33cdddbe191c2628c421f82d0e9c78b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nextvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc index dd9e2e61ac760e8eefa62c831c9d03068017e75e..298ba8363d35b0de2d76efeeceb2bed21b4f903d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc index 875281cb2a5f5f27e1161721ea9bca138f025c1b..703ccc9991c43ca269cea049743d8e8e197851c6 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc index 3c9277483f4a6f09ef594a7b68f903ea4c5fb4ff..138e654fae1a4977e1a947a1995bfaf37e815e3f 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/pnasnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc index 700484de581878bba81578649fd8fd28b159c04f..56f4edc60e6d8ddcd032a2c8e63b12d10793af98 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc index 8dadd8649cb705c4b4585522a60372d9652cd57b..364b7fe0dfd96bede26566bf23891aeb1852391e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/rdnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc index 93e70fcd15cf8032fccb86e3408032f8e9b163fe..987847daae6ce89b5c7635fdf2359b6293466c8c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/regnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc index d21c8e65637f137dd14b2c1f5fb40c853b0186f3..a37e9422d47a4cbb11909e15b699b18efae61362 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/repghost.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc index fc00b7a3fb46a38c5ec2511c54bc5c962623bef4..3b2bb33ce636bab5e7451645e2f523ff3df31afd 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc index d12345cf75f624cbc0859e3377c86f166b89ed5e..409098e3a1b42498a30fee882138d0809cb23d23 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc index 1859eee2ac71bf6d2645cfa7a436d3c4e5ba991e..5b943df4f625ee2cd38af79435eb419aca6982a9 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc index 4707acd88e56719393a5afbde2b13f217cff3993..f4d722f3b0ad90d4aa36b8a3fcf3418e638f4ec3 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc index bdd2e3d9144868693a2873d7468e6313b4a5d569..0b0c0972986ef1dfb541a3419af7fbe71eb3918f 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/resnetv2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc index 11802a9eee8de55d7379e4937120e7fc73e36fda..eb7afe4232bf6f9f8f27e4d2a98ed2092d4d986d 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/rexnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc index cacb9f9c3086186de381d7b33431688ca201e16f..ef65cfcc65e54ea28564f2c07a0cb0bdf08d3b31 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/selecsls.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc index 030cca3f61a407cd79b67904a9bffa93980b6a66..391127723baad352549d6327d0e784e5151ef945 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc index 3b2b679a0ee430623c6ac3c6732803705b1309ec..b02e059e86b2880b25fbf2dc0fde964bf016ac91 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc index c588b88a767d61615b1b9e1105c7902cc4c1018a..e4bc0bd0c1080d38a983a2ce4bb8b4e30bbb8582 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc index bb3739db13bda0ae46b977922b863210612d57ce..22d713d8599aa4bf48f2dcc6a3ba8c022e0afe0e 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/swin_transformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc index 6c7e6fe6973f7eb6bd6fbca526575893fb090e2c..14c930502257640a0222d0d28c2426e45eb2c2ba 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc index 2a0054badad4c0686ef1c44172a6322e7ab527a9..8e2734f3564979c36342fada0c000f51d2728ea6 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/swin_transformer_v2_cr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc index eed1436409beb1f3ef84b385c38b35446e29b04d..aaf45ca818deac916a32a76749026a3f6bb962e4 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc index fddf8315ddd887bc9a2974f43e3bb6b520a47361..eb4503a42468308038c65f9ca34fb9c1290c2900 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/tnt.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc index 0f732bb3ffa6497b333652a8102be7e0f940abac..7b9ca65ae82010e2723fd4b0bce89ee94d5ea7cd 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/tresnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc index 9229153a840ac68404802d57a0485d792b36f91e..630a98b1193135973cb6d0c0adb3cba2edc7ecf5 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/twins.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc index 624e208371150a8f389a626a8b8ba434fda98449..946b2024f5dc251c823bc5953c92dd81826d509b 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vgg.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc index 8f1ebd648ae152be95312e72678c5fffc90882b8..853b5a0a03421227931767141de2786b171f55f1 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc index 001c799b907d5bb1af689348330c4f2e610bc13b..61b81b627d8861cebdb8fda831fb70bdedd624f5 100644 --- a/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc +++ b/pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42aa953ac2ee04c5ad498a3591517aea6e3bc525cc4a00d4496d123a81ed14d1 +oid sha256:fe4febf77541930407dbe26f628ca0a22d3114c321ea68e9cb9f1aa95ba51719 size 113395 diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc index 9d57baba5d5b248567fe09049fc92893eee11a06..9f90012fc5f514b2e55701927f7ca1ea05b21a0c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vision_transformer_hybrid.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc index ae83ebaffbeb6835827518fd4a76d57e12edeb9f..de5ab77c00e898c16694abae6a393d3aeefd81b0 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vision_transformer_relpos.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc index 85bbb399582d7dabdd75cb7d61a805da9b9494a4..8a82c563fd53df0ed4a4c5495d95933ece295f11 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vision_transformer_sam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc index b63e749dbbd92ab328e4bc9b8e4d5b7b9891e530..c96c6cb64d57f933e318061e5f2c17c90893b807 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vitamin.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc index d55fb778003344d81782493b92dfd32edce2192e..8c8134ec4853a208ffa7740f319270e3bff8eadc 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/volo.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc index 41cf1f37f1f36ff0db99a098900c557f567d2fb9..b1c776091c8c2b9a49b58bb66f3b937a162e187c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/vovnet.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc index 713ac3f55c464ce6f0e21c928e4a07e183d45c9e..6e160139f32e31e86daceca69e8d679bfb666e83 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/xception.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc index 9f91595e23ed06ae2f53c3f664904a302b7e3f9e..b5db0a680605fa3ca6f0e9852a0e26ceb182b70c 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/xception_aligned.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc b/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc index 91ee8ae355986f09055c0680d801dde09f768f6b..7b9c371411eff0bbaadce3b3c4a3e396d085c70a 100644 Binary files a/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc and b/pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc index bf4cbec4388e86aeb5e491819f70d82217336cb5..036df469ebe2ced3e48d88367bae63e62c7dff98 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc index c0d07362ab1718ec6fe63377be8b1ce817450a8d..3fd44b309030f20923c697c0b8dd84d5fcb5747c 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/_optim_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc index 18ae8e6de0ae9ee37cd7dd655e18e0a155b5674f..3d97f511ffd56ff0edb31b5cdad0bf98bf514744 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/_param_groups.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc index 85a5e5a759c18881ba9475e231e99726629c33bd..7177a853bb4dc4f8e50e1cfa1fc5b8506529da62 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/_types.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc index 518e3a79a45df767886c1c885bfa3eb93707ab5d..3d28b6402b0a907fde7fd14b300ab9c1e051caf1 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adabelief.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc index 233d4bd940194b26eb620e5ba2a4c8c0d52947dd..a27977fec4c6672e1b78df12e472197dbba17a08 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adafactor.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc index 033cda287f2e02134f6029bf27ac2b8f48a7329f..18fa5592c7efff71b9eaed79df4a59be16db0433 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adafactor_bv.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc index d83ab23983109ff5058789e8677c44d84fc2c822..befabcf0ef8226f864f6437d554f0dc0b0ace75b 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adahessian.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc index b22688bd75f2bcb6f89b045199f1d1b25b6d0c98..acf9f6b461f4698fa787da74fff93ebf25e6d56e 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adamp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc index 7ea68e2a6e8d39bfbedbb0ee11d3815074055158..14ef40da97d85f19ec32929940db8a602bcedf80 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adamw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc index 4f2f91a813263705f250451916df420d5c759c04..df213ad8a516354974c5f2aacd47c9d304d2dc65 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adan.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc index 1cf69dd14d40fba454b2e3f95e538f34042d37b5..d738451f21fa7bf834ef95637f230b7a5abd398c 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/adopt.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/kron.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/kron.cpython-39.pyc index b0ea719d34343f481da1d143e61505cd8e2b63c3..f631a8b4734a04747ad5cc09d1e6f5864d859d84 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/kron.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/kron.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc index 6e979c2ea643ef2de8b10556a909a786054356bf..a6ee4d6fc21e5323a32a8578c82f0507641daff2 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lamb.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc index bec98a1da09b92ffec7120aaa4704c6e80066cd1..1d73be289b2bf72e58923037825296375239094b 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/laprop.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc index 57253c5c54e6b7c68520bb27d3e628bb4612f1c2..dcaae0f30b921d3b4262e5874b92ac4755af5870 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lars.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc index 4d41f68f96c01234f101ad879d024f11d9059752..65e2f5f375c4d9d3cc5020fbe516c7dc38606ec9 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lion.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc index 093a15ab4ef98e0213a549c5b2ecce4ae69bde85..caaac87dd284cfcea181e1decc8014eca2573256 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/lookahead.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc index 1a0866a9fdaabb08af84bf7530a2a7baad94dd15..f3bcb141ba0b72047e1687667a1fce9c5d72e9cd 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/madgrad.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc index 5fc040c2b4a44d79aac254afaba4b7e25349a6e2..08e145eee408c3487ba285089f10598b25da6057 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/mars.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc index c3545ccae984ce0553e201a432ce7fa56b917bf4..b8abce573102d28a695901e3107235fe872f9a0e 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/nadam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc index d361f0c6ca5ffe2870497889469d1db80f1184cd..5a503c9b76741052ddf63fcb38cebb4d5e9a5cb2 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/nadamw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc index cd13b43e39ae57278b4cc05c581c5026ac7d96b2..230457db658ba254999ffdda75428ced074e7871 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/nvnovograd.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc index b931ba31f410eb709c647bc2460fa14fc2929a97..79adf18c206c257f710b5a863cf11fb6d71c5cfa 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/radam.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc index d95a5f242ede9b83b528acef0942d9be2619f8de..6d9582602bdc115a760d7242c3d26a230aab2454 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/rmsprop_tf.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc index 88365a6379e8e6ef21b791d8cdee49f15c18d55b..7b8ff6e9b7c3dd3999a8a2dedeb2044c1fbf1319 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/sgdp.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc b/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc index 09368e2c392c4e64a6b6383683fd4acf96d0142d..6511975737a4800085c4dad0117c12d246b3d773 100644 Binary files a/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc and b/pytorch-image-models/timm/optim/__pycache__/sgdw.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc index 6495c82bb8879adc00320174566988c8425a6c28..ea0c18fd6367435574c3bac7568df1b222aaa779 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc index 22eb7c4e7c82f594e3f23426bdc153c6f5822bfd..f0d26a3092d278909b00e57ff58c3752117561dc 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/cosine_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc index 2536ad510eecc34bd4e83c6c9f09a832a7d86cec..a576fa51ae77ede72592f46516676f38993e0148 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/multistep_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc index c231c68f411f80803d0b77a853315d5319361045..53b626c63e8571c09f85742a8d2bd3dfd01a3d86 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/plateau_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc index c51219427c5d3560b0f7bbc33c25adbeca03cb14..4a027b0e27c9a0c634909921d0074f7b15f923dc 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/poly_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc index 7949c5474f41bce0531eb4f857c768d1d16e0a92..de44b1c383265f067f5931cbcc4b57f1e0b97a24 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/scheduler.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc index 024a233c89e2b7e58a04271124155ea922f63a57..b5fb919836ece14510cc1bb1e84d2455f8d25da6 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/scheduler_factory.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc index 0e0ba5fa953ed2a15e28d145628748a26332d50b..48759ed9ecee790d479b8f6a11fe65a74108f1be 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/step_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc b/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc index 804399fa74b9c45025dea284da4c773eb4297387..0159a87e08f61f7d214e74f1343a7c18d6e88d39 100644 Binary files a/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc and b/pytorch-image-models/timm/scheduler/__pycache__/tanh_lr.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc index 6164ebaa4b0fd47572e362af8a02e31a2cd286b6..2afcded53543a5e57fec55a5998cbd11beadddac 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc index c6446c364aa16b020e895d1c6f0f02b74062687c..8edda40404f0a5c206908b1b18dd17bd23442762 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/agc.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc index 9149657e5f71e05552bf798610efea9d35a27e3c..2136b5d1c3997232298d3e6f6f4d90208c182468 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/attention_extract.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc index a0a518457428011aa881436047367147a03f36e4..7172f11f277b130d006187c981e25b93602960dc 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/checkpoint_saver.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc index c086f46d7c98f6c4af01bce2b342a3afcf265aec..c8530abf1d4d749db1ac5f6b51928b49a88b4aad 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/clip_grad.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc index 532a17ce72a2d065f487e3b0105c004b6a834cf9..e9d685c9b761db93c5a62c305cbcbfd145640436 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/cuda.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc index 5fa2b21d7e170a5edf1387485f0dadcbd0d62087..0bd6321e31095c1f8762117a87076579dd0032ce 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/decay_batch.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc index 0bc77467b791a75c4b36c74970589867cbc75ad6..95cc7d00902568261d8726316eed380b626ed256 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/distributed.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc index 1cf604c72b98be9f1a3bfeb7a9892995016dd4a0..d5d616484965d241e6b3c5f4d18a80f6653be232 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/jit.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc index 156784f1ca29796e839cfb0d106780496aa30747..4f88ba02effa5c29ed7f7a937a2c54de86e1e81c 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/log.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc index 46281c8917e9e21815b87b13d03062cd31bd8b1b..0e4a245a5b6d7bb456ef2d071abc6445e732e723 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/metrics.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc index 9cd055a2bb0f0e1f6ee64fc5cdf7ddc6dd3da615..c04b8d7ab86fc97588e989d971847c65907b0b71 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/misc.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc index 6898792f5cfe63ab34ba3c62b811b8ce124f0ae0..9709bc62b7d4dabbb57b941bec0dc069d707fac3 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/model.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc index 6818e0052d919553637b547d97b5e3c894a5f825..0dc83b54c4553f89cee1dbdb25c7acbf974426dc 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/model_ema.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc index b2cfc356b63687cf833c9253898e1b2d9ac94ea2..4103302fa7aa417eda625c27fa9bd800dddc99e0 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/random.cpython-39.pyc differ diff --git a/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc b/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc index 5ab22d3fa2aadd42e7b5f9f39c839f9bdb988b6e..6060be608d2350c6d39f09fb1ea4439252968568 100644 Binary files a/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc and b/pytorch-image-models/timm/utils/__pycache__/summary.cpython-39.pyc differ diff --git a/pytorch-image-models/wandb/debug-internal.log b/pytorch-image-models/wandb/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..22a7ee2db73423355269b65089213c10dd539b34 --- /dev/null +++ b/pytorch-image-models/wandb/debug-internal.log @@ -0,0 +1,9 @@ +{"time":"2025-02-22T01:50:06.787729747Z","level":"INFO","msg":"stream: starting","core version":"0.19.7","symlink path":"/app/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-core.log"} +{"time":"2025-02-22T01:50:06.892597095Z","level":"INFO","msg":"created new stream","id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892638216Z","level":"INFO","msg":"stream: started","id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892712377Z","level":"INFO","msg":"sender: started","stream_id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892704957Z","level":"INFO","msg":"writer: Do: started","stream_id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892806289Z","level":"INFO","msg":"handler: started","stream_id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:07.093461314Z","level":"INFO","msg":"Starting system monitor"} +{"time":"2025-02-22T08:01:37.094049977Z","level":"ERROR","msg":"monitor: cpu: error sampling metrics: open /proc/156/stat: no such file or directory\nopen /proc/156/status: no such file or directory"} +{"time":"2025-02-22T08:01:37.094183951Z","level":"ERROR","msg":"monitor: memory: error sampling metrics: open /proc/156/statm: no such file or directory"} diff --git a/pytorch-image-models/wandb/debug.log b/pytorch-image-models/wandb/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..f0aa447db772ada739b722a5a5e5f83e42d4d30c --- /dev/null +++ b/pytorch-image-models/wandb/debug.log @@ -0,0 +1,22 @@ +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Current SDK version is 0.19.7 +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Configure stats pid to 156 +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Loading settings from /home/user/.config/wandb/settings +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Loading settings from /app/pytorch-image-models/wandb/settings +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Loading settings from environment variables +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:setup_run_log_directory():647] Logging user logs to /app/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug.log +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:setup_run_log_directory():648] Logging internal logs to /app/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-internal.log +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():761] calling init triggers +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():766] wandb.init called with sweep_config: {} +config: {'data': None, 'data_dir': None, 'dataset': 'hfds/datacomp/imagenet-1k-random-60.0-frac-1over4', 'train_split': 'train', 'val_split': 'validation', 'train_num_samples': None, 'val_num_samples': None, 'dataset_download': False, 'class_map': '', 'input_img_mode': None, 'input_key': None, 'target_key': None, 'dataset_trust_remote_code': False, 'model': 'seresnet34', 'pretrained': False, 'pretrained_path': None, 'initial_checkpoint': '', 'resume': '', 'no_resume_opt': False, 'num_classes': 1000, 'gp': None, 'img_size': None, 'in_chans': None, 'input_size': None, 'crop_pct': None, 'mean': None, 'std': None, 'interpolation': '', 'batch_size': 256, 'validation_batch_size': None, 'channels_last': False, 'fuser': '', 'grad_accum_steps': 1, 'grad_checkpointing': False, 'fast_norm': False, 'model_kwargs': {}, 'head_init_scale': None, 'head_init_bias': None, 'torchcompile_mode': None, 'torchscript': False, 'torchcompile': None, 'device': 'cuda:0', 'amp': True, 'amp_dtype': 'float16', 'amp_impl': 'native', 'model_dtype': None, 'no_ddp_bb': False, 'synchronize_step': False, 'local_rank': 0, 'device_modules': None, 'opt': 'sgd', 'opt_eps': None, 'opt_betas': None, 'momentum': 0.9, 'weight_decay': 2e-05, 'clip_grad': None, 'clip_mode': 'norm', 'layer_decay': None, 'opt_kwargs': {}, 'sched': 'cosine', 'sched_on_updates': False, 'lr': 0.4, 'lr_base': 0.1, 'lr_base_size': 256, 'lr_base_scale': '', 'lr_noise': None, 'lr_noise_pct': 0.67, 'lr_noise_std': 1.0, 'lr_cycle_mul': 1.0, 'lr_cycle_decay': 0.5, 'lr_cycle_limit': 1, 'lr_k_decay': 1.0, 'warmup_lr': 1e-05, 'min_lr': 0, 'epochs': 150, 'epoch_repeats': 0.0, 'start_epoch': None, 'decay_milestones': [90, 180, 270], 'decay_epochs': 90, 'warmup_epochs': 5, 'warmup_prefix': False, 'cooldown_epochs': 0, 'patience_epochs': 10, 'decay_rate': 0.1, 'no_aug': False, 'train_crop_mode': None, 'scale': [0.08, 1.0], 'ratio': [0.75, 1.3333333333333333], 'hflip': 0.5, 'vflip': 0.0, 'color_jitter': 0.4, 'color_jitter_prob': None, 'grayscale_prob': None, 'gaussian_blur_prob': None, 'aa': None, 'aug_repeats': 0, 'aug_splits': 0, 'jsd_loss': False, 'bce_loss': False, 'bce_sum': False, 'bce_target_thresh': None, 'bce_pos_weight': None, 'reprob': 0.5, 'remode': 'pixel', 'recount': 1, 'resplit': False, 'mixup': 0.0, 'cutmix': 0.0, 'cutmix_minmax': None, 'mixup_prob': 1.0, 'mixup_switch_prob': 0.5, 'mixup_mode': 'batch', 'mixup_off_epoch': 0, 'smoothing': 0.1, 'train_interpolation': 'random', 'drop': 0.0, 'drop_connect': None, 'drop_path': None, 'drop_block': None, 'bn_momentum': None, 'bn_eps': None, 'sync_bn': False, 'dist_bn': 'reduce', 'split_bn': False, 'model_ema': False, 'model_ema_force_cpu': False, 'model_ema_decay': 0.9998, 'model_ema_warmup': False, 'seed': 42, 'worker_seeding': 'all', 'log_interval': 50, 'recovery_interval': 0, 'checkpoint_hist': 10, 'workers': 4, 'save_images': False, 'pin_mem': False, 'no_prefetcher': False, 'output': '', 'experiment': 'ImageNetTraining60.0-frac-1over4', 'eval_metric': 'top1', 'tta': 0, 'use_multi_epochs_loader': False, 'log_wandb': True, 'wandb_project': 'ImageNetTraining60.0-frac-1over4', 'wandb_tags': [], 'wandb_resume_id': '', 'prefetcher': True, 'distributed': True, 'world_size': 4, 'rank': 0, '_wandb': {}} +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():784] starting backend +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():788] sending inform_init request +2025-02-22 01:50:06,785 INFO MainThread:156 [backend.py:_multiprocessing_setup():97] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2025-02-22 01:50:06,786 INFO MainThread:156 [wandb_init.py:init():803] backend started and connected +2025-02-22 01:50:06,790 INFO MainThread:156 [wandb_init.py:init():896] updated telemetry +2025-02-22 01:50:06,814 INFO MainThread:156 [wandb_init.py:init():920] communicating run to backend with 90.0 second timeout +2025-02-22 01:50:07,090 INFO MainThread:156 [wandb_init.py:init():995] starting run threads in backend +2025-02-22 01:50:07,170 INFO MainThread:156 [wandb_run.py:_console_start():2377] atexit reg +2025-02-22 01:50:07,171 INFO MainThread:156 [wandb_run.py:_redirect():2227] redirect: wrap_raw +2025-02-22 01:50:07,171 INFO MainThread:156 [wandb_run.py:_redirect():2292] Wrapping output streams. +2025-02-22 01:50:07,171 INFO MainThread:156 [wandb_run.py:_redirect():2317] Redirects installed. +2025-02-22 01:50:07,173 INFO MainThread:156 [wandb_init.py:init():1037] run started, returning control to user process diff --git a/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/output.log b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..d375687d4d86ff91ab57b52ef3bcdba44c5a4478 --- /dev/null +++ b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/output.log @@ -0,0 +1,2266 @@ +Scheduled epochs: 150 (epochs + cooldown_epochs). Warmup within epochs when warmup_prefix=False. LR stepped per epoch. +Train: 0 [ 0/312 ( 0%)] Loss: 6.94 (6.94) Time: 3.892s, 263.14/s (3.892s, 263.14/s) LR: 1.000e-05 Data: 1.324 (1.324) +Train: 0 [ 50/312 ( 16%)] Loss: 6.95 (6.94) Time: 0.392s, 2611.98/s (0.459s, 2229.54/s) LR: 1.000e-05 Data: 0.027 (0.051) +Train: 0 [ 100/312 ( 32%)] Loss: 6.95 (6.94) Time: 0.393s, 2603.93/s (0.427s, 2399.97/s) LR: 1.000e-05 Data: 0.027 (0.040) +Train: 0 [ 150/312 ( 48%)] Loss: 6.96 (6.94) Time: 0.396s, 2587.00/s (0.416s, 2460.47/s) LR: 1.000e-05 Data: 0.028 (0.036) +Train: 0 [ 200/312 ( 64%)] Loss: 6.94 (6.94) Time: 0.396s, 2587.69/s (0.411s, 2490.56/s) LR: 1.000e-05 Data: 0.026 (0.034) +Train: 0 [ 250/312 ( 80%)] Loss: 6.94 (6.94) Time: 0.396s, 2584.69/s (0.408s, 2507.88/s) LR: 1.000e-05 Data: 0.028 (0.032) +Train: 0 [ 300/312 ( 96%)] Loss: 6.94 (6.94) Time: 0.397s, 2581.15/s (0.406s, 2519.37/s) LR: 1.000e-05 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.544 (1.544) Loss: 6.946 ( 6.946) Acc@1: 0.098 ( 0.098) Acc@5: 0.488 ( 0.488) +Test: [ 48/48] Time: 0.690 (0.346) Loss: 6.940 ( 6.939) Acc@1: 0.118 ( 0.078) Acc@5: 0.354 ( 0.516) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 1 [ 0/312 ( 0%)] Loss: 6.93 (6.93) Time: 1.664s, 615.25/s (1.664s, 615.25/s) LR: 8.001e-02 Data: 1.300 (1.300) +Train: 1 [ 50/312 ( 16%)] Loss: 6.91 (6.92) Time: 0.404s, 2536.74/s (0.431s, 2374.78/s) LR: 8.001e-02 Data: 0.028 (0.052) +Train: 1 [ 100/312 ( 32%)] Loss: 6.91 (6.92) Time: 0.403s, 2539.20/s (0.418s, 2451.63/s) LR: 8.001e-02 Data: 0.024 (0.040) +Train: 1 [ 150/312 ( 48%)] Loss: 6.91 (6.91) Time: 0.405s, 2526.38/s (0.413s, 2477.12/s) LR: 8.001e-02 Data: 0.029 (0.036) +Train: 1 [ 200/312 ( 64%)] Loss: 6.89 (6.91) Time: 0.407s, 2513.19/s (0.411s, 2488.52/s) LR: 8.001e-02 Data: 0.027 (0.034) +Train: 1 [ 250/312 ( 80%)] Loss: 6.90 (6.91) Time: 0.404s, 2533.54/s (0.410s, 2495.24/s) LR: 8.001e-02 Data: 0.027 (0.032) +Train: 1 [ 300/312 ( 96%)] Loss: 6.88 (6.90) Time: 0.407s, 2515.75/s (0.410s, 2499.02/s) LR: 8.001e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.453 (1.453) Loss: 6.736 ( 6.736) Acc@1: 0.586 ( 0.586) Acc@5: 3.809 ( 3.809) +Test: [ 48/48] Time: 0.092 (0.328) Loss: 6.707 ( 6.736) Acc@1: 0.825 ( 0.784) Acc@5: 3.774 ( 3.358) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 2 [ 0/312 ( 0%)] Loss: 6.88 (6.88) Time: 1.915s, 534.66/s (1.915s, 534.66/s) LR: 1.600e-01 Data: 1.541 (1.541) +Train: 2 [ 50/312 ( 16%)] Loss: 6.88 (6.88) Time: 0.403s, 2541.71/s (0.436s, 2349.93/s) LR: 1.600e-01 Data: 0.027 (0.057) +Train: 2 [ 100/312 ( 32%)] Loss: 6.87 (6.88) Time: 0.405s, 2528.14/s (0.420s, 2436.27/s) LR: 1.600e-01 Data: 0.027 (0.043) +Train: 2 [ 150/312 ( 48%)] Loss: 6.87 (6.87) Time: 0.406s, 2521.75/s (0.415s, 2465.61/s) LR: 1.600e-01 Data: 0.027 (0.038) +Train: 2 [ 200/312 ( 64%)] Loss: 6.87 (6.87) Time: 0.408s, 2508.85/s (0.413s, 2478.73/s) LR: 1.600e-01 Data: 0.027 (0.035) +Train: 2 [ 250/312 ( 80%)] Loss: 6.86 (6.87) Time: 0.415s, 2467.12/s (0.413s, 2480.88/s) LR: 1.600e-01 Data: 0.027 (0.034) +Train: 2 [ 300/312 ( 96%)] Loss: 6.87 (6.87) Time: 0.414s, 2471.15/s (0.413s, 2478.46/s) LR: 1.600e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.491 (1.491) Loss: 6.601 ( 6.601) Acc@1: 1.074 ( 1.074) Acc@5: 3.906 ( 3.906) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 6.565 ( 6.598) Acc@1: 1.061 ( 1.260) Acc@5: 4.599 ( 4.526) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 3 [ 0/312 ( 0%)] Loss: 6.83 (6.83) Time: 1.977s, 518.04/s (1.977s, 518.04/s) LR: 2.400e-01 Data: 1.601 (1.601) +Train: 3 [ 50/312 ( 16%)] Loss: 6.86 (6.85) Time: 0.414s, 2470.97/s (0.443s, 2309.99/s) LR: 2.400e-01 Data: 0.027 (0.058) +Train: 3 [ 100/312 ( 32%)] Loss: 6.85 (6.85) Time: 0.407s, 2519.02/s (0.427s, 2397.05/s) LR: 2.400e-01 Data: 0.029 (0.043) +Train: 3 [ 150/312 ( 48%)] Loss: 6.84 (6.85) Time: 0.403s, 2543.00/s (0.420s, 2438.10/s) LR: 2.400e-01 Data: 0.027 (0.038) +Train: 3 [ 200/312 ( 64%)] Loss: 6.84 (6.85) Time: 0.405s, 2529.97/s (0.416s, 2460.51/s) LR: 2.400e-01 Data: 0.027 (0.035) +Train: 3 [ 250/312 ( 80%)] Loss: 6.82 (6.85) Time: 0.407s, 2518.44/s (0.414s, 2473.44/s) LR: 2.400e-01 Data: 0.027 (0.033) +Train: 3 [ 300/312 ( 96%)] Loss: 6.82 (6.84) Time: 0.408s, 2512.50/s (0.413s, 2480.19/s) LR: 2.400e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.461 (1.461) Loss: 6.514 ( 6.514) Acc@1: 1.758 ( 1.758) Acc@5: 6.543 ( 6.543) +Test: [ 48/48] Time: 0.091 (0.328) Loss: 6.499 ( 6.519) Acc@1: 2.123 ( 1.810) Acc@5: 5.778 ( 6.318) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 4 [ 0/312 ( 0%)] Loss: 6.82 (6.82) Time: 1.766s, 579.86/s (1.766s, 579.86/s) LR: 3.200e-01 Data: 1.389 (1.389) +Train: 4 [ 50/312 ( 16%)] Loss: 6.79 (6.82) Time: 0.418s, 2449.30/s (0.441s, 2320.45/s) LR: 3.200e-01 Data: 0.031 (0.054) +Train: 4 [ 100/312 ( 32%)] Loss: 6.83 (6.82) Time: 0.406s, 2521.20/s (0.425s, 2407.32/s) LR: 3.200e-01 Data: 0.027 (0.041) +Train: 4 [ 150/312 ( 48%)] Loss: 6.80 (6.82) Time: 0.405s, 2530.29/s (0.419s, 2443.86/s) LR: 3.200e-01 Data: 0.028 (0.036) +Train: 4 [ 200/312 ( 64%)] Loss: 6.83 (6.82) Time: 0.406s, 2521.84/s (0.416s, 2461.82/s) LR: 3.200e-01 Data: 0.028 (0.034) +Train: 4 [ 250/312 ( 80%)] Loss: 6.81 (6.82) Time: 0.413s, 2476.66/s (0.415s, 2469.72/s) LR: 3.200e-01 Data: 0.028 (0.033) +Train: 4 [ 300/312 ( 96%)] Loss: 6.82 (6.82) Time: 0.414s, 2474.26/s (0.415s, 2469.57/s) LR: 3.200e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.443 (1.443) Loss: 6.512 ( 6.512) Acc@1: 2.246 ( 2.246) Acc@5: 6.641 ( 6.641) +Test: [ 48/48] Time: 0.093 (0.332) Loss: 6.487 ( 6.500) Acc@1: 1.297 ( 1.904) Acc@5: 5.660 ( 5.818) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 5 [ 0/312 ( 0%)] Loss: 6.82 (6.82) Time: 1.538s, 665.93/s (1.538s, 665.93/s) LR: 3.989e-01 Data: 0.992 (0.992) +Train: 5 [ 50/312 ( 16%)] Loss: 6.79 (6.80) Time: 0.423s, 2419.92/s (0.442s, 2317.75/s) LR: 3.989e-01 Data: 0.026 (0.046) +Train: 5 [ 100/312 ( 32%)] Loss: 6.80 (6.80) Time: 0.419s, 2442.09/s (0.431s, 2373.91/s) LR: 3.989e-01 Data: 0.027 (0.037) +Train: 5 [ 150/312 ( 48%)] Loss: 6.81 (6.80) Time: 0.417s, 2456.06/s (0.427s, 2400.90/s) LR: 3.989e-01 Data: 0.027 (0.033) +Train: 5 [ 200/312 ( 64%)] Loss: 6.79 (6.80) Time: 0.422s, 2428.66/s (0.424s, 2412.88/s) LR: 3.989e-01 Data: 0.028 (0.032) +Train: 5 [ 250/312 ( 80%)] Loss: 6.81 (6.80) Time: 0.427s, 2400.47/s (0.424s, 2415.12/s) LR: 3.989e-01 Data: 0.028 (0.031) +Train: 5 [ 300/312 ( 96%)] Loss: 6.80 (6.80) Time: 0.419s, 2441.88/s (0.423s, 2418.38/s) LR: 3.989e-01 Data: 0.029 (0.030) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.441 (1.441) Loss: 6.359 ( 6.359) Acc@1: 2.344 ( 2.344) Acc@5: 9.766 ( 9.766) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 6.334 ( 6.354) Acc@1: 2.830 ( 2.794) Acc@5: 10.024 ( 9.218) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 6 [ 0/312 ( 0%)] Loss: 6.72 (6.72) Time: 1.519s, 674.12/s (1.519s, 674.12/s) LR: 3.984e-01 Data: 1.140 (1.140) +Train: 6 [ 50/312 ( 16%)] Loss: 6.76 (6.77) Time: 0.418s, 2451.89/s (0.439s, 2330.51/s) LR: 3.984e-01 Data: 0.027 (0.049) +Train: 6 [ 100/312 ( 32%)] Loss: 6.80 (6.77) Time: 0.414s, 2472.69/s (0.429s, 2386.33/s) LR: 3.984e-01 Data: 0.026 (0.038) +Train: 6 [ 150/312 ( 48%)] Loss: 6.74 (6.77) Time: 0.414s, 2471.19/s (0.424s, 2413.23/s) LR: 3.984e-01 Data: 0.026 (0.034) +Train: 6 [ 200/312 ( 64%)] Loss: 6.79 (6.76) Time: 0.417s, 2454.78/s (0.423s, 2422.12/s) LR: 3.984e-01 Data: 0.026 (0.032) +Train: 6 [ 250/312 ( 80%)] Loss: 6.74 (6.76) Time: 0.412s, 2484.73/s (0.423s, 2422.33/s) LR: 3.984e-01 Data: 0.026 (0.031) +Train: 6 [ 300/312 ( 96%)] Loss: 6.79 (6.76) Time: 0.407s, 2513.16/s (0.421s, 2434.97/s) LR: 3.984e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.516 (1.516) Loss: 6.173 ( 6.173) Acc@1: 3.906 ( 3.906) Acc@5: 10.840 ( 10.840) +Test: [ 48/48] Time: 0.090 (0.332) Loss: 6.118 ( 6.147) Acc@1: 4.363 ( 3.798) Acc@5: 14.623 ( 12.226) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 7 [ 0/312 ( 0%)] Loss: 6.74 (6.74) Time: 1.798s, 569.47/s (1.798s, 569.47/s) LR: 3.979e-01 Data: 1.125 (1.125) +Train: 7 [ 50/312 ( 16%)] Loss: 6.76 (6.73) Time: 0.403s, 2537.86/s (0.432s, 2369.79/s) LR: 3.979e-01 Data: 0.027 (0.048) +Train: 7 [ 100/312 ( 32%)] Loss: 6.70 (6.73) Time: 0.406s, 2520.03/s (0.419s, 2442.42/s) LR: 3.979e-01 Data: 0.028 (0.038) +Train: 7 [ 150/312 ( 48%)] Loss: 6.71 (6.73) Time: 0.408s, 2510.01/s (0.416s, 2464.18/s) LR: 3.979e-01 Data: 0.026 (0.034) +Train: 7 [ 200/312 ( 64%)] Loss: 6.72 (6.73) Time: 0.414s, 2475.36/s (0.415s, 2467.59/s) LR: 3.979e-01 Data: 0.026 (0.033) +Train: 7 [ 250/312 ( 80%)] Loss: 6.70 (6.73) Time: 0.429s, 2386.23/s (0.416s, 2459.16/s) LR: 3.979e-01 Data: 0.029 (0.032) +Train: 7 [ 300/312 ( 96%)] Loss: 6.73 (6.73) Time: 0.418s, 2448.03/s (0.417s, 2454.16/s) LR: 3.979e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.516 (1.516) Loss: 5.991 ( 5.991) Acc@1: 4.492 ( 4.492) Acc@5: 12.598 ( 12.598) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 5.956 ( 5.994) Acc@1: 6.014 ( 4.726) Acc@5: 16.156 ( 14.166) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 8 [ 0/312 ( 0%)] Loss: 6.66 (6.66) Time: 1.491s, 686.95/s (1.491s, 686.95/s) LR: 3.972e-01 Data: 1.113 (1.113) +Train: 8 [ 50/312 ( 16%)] Loss: 6.72 (6.70) Time: 0.414s, 2473.19/s (0.436s, 2347.18/s) LR: 3.972e-01 Data: 0.027 (0.048) +Train: 8 [ 100/312 ( 32%)] Loss: 6.65 (6.70) Time: 0.427s, 2400.78/s (0.429s, 2387.78/s) LR: 3.972e-01 Data: 0.028 (0.038) +Train: 8 [ 150/312 ( 48%)] Loss: 6.66 (6.70) Time: 0.416s, 2460.02/s (0.426s, 2405.53/s) LR: 3.972e-01 Data: 0.028 (0.034) +Train: 8 [ 200/312 ( 64%)] Loss: 6.68 (6.70) Time: 0.414s, 2476.00/s (0.423s, 2422.79/s) LR: 3.972e-01 Data: 0.026 (0.033) +Train: 8 [ 250/312 ( 80%)] Loss: 6.71 (6.70) Time: 0.417s, 2458.29/s (0.421s, 2429.65/s) LR: 3.972e-01 Data: 0.028 (0.032) +Train: 8 [ 300/312 ( 96%)] Loss: 6.67 (6.70) Time: 0.421s, 2432.30/s (0.422s, 2426.89/s) LR: 3.972e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.439 (1.439) Loss: 5.940 ( 5.940) Acc@1: 4.688 ( 4.688) Acc@5: 15.234 ( 15.234) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 5.871 ( 5.929) Acc@1: 6.014 ( 5.476) Acc@5: 19.693 ( 15.958) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 9 [ 0/312 ( 0%)] Loss: 6.63 (6.63) Time: 1.768s, 579.16/s (1.768s, 579.16/s) LR: 3.965e-01 Data: 1.389 (1.389) +Train: 9 [ 50/312 ( 16%)] Loss: 6.69 (6.66) Time: 0.416s, 2460.29/s (0.439s, 2330.89/s) LR: 3.965e-01 Data: 0.028 (0.054) +Train: 9 [ 100/312 ( 32%)] Loss: 6.70 (6.67) Time: 0.421s, 2434.49/s (0.428s, 2392.80/s) LR: 3.965e-01 Data: 0.028 (0.041) +Train: 9 [ 150/312 ( 48%)] Loss: 6.67 (6.67) Time: 0.428s, 2391.31/s (0.427s, 2397.60/s) LR: 3.965e-01 Data: 0.028 (0.036) +Train: 9 [ 200/312 ( 64%)] Loss: 6.67 (6.67) Time: 0.422s, 2426.87/s (0.427s, 2396.26/s) LR: 3.965e-01 Data: 0.027 (0.034) +Train: 9 [ 250/312 ( 80%)] Loss: 6.67 (6.67) Time: 0.410s, 2499.91/s (0.425s, 2406.60/s) LR: 3.965e-01 Data: 0.027 (0.033) +Train: 9 [ 300/312 ( 96%)] Loss: 6.62 (6.67) Time: 0.407s, 2518.93/s (0.423s, 2421.38/s) LR: 3.965e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 5.839 ( 5.839) Acc@1: 6.152 ( 6.152) Acc@5: 17.969 ( 17.969) +Test: [ 48/48] Time: 0.091 (0.328) Loss: 5.769 ( 5.843) Acc@1: 8.137 ( 6.492) Acc@5: 19.458 ( 18.124) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-0.pth.tar', 0.0779999999666214) + +Train: 10 [ 0/312 ( 0%)] Loss: 6.65 (6.65) Time: 1.871s, 547.22/s (1.871s, 547.22/s) LR: 3.956e-01 Data: 1.207 (1.207) +Train: 10 [ 50/312 ( 16%)] Loss: 6.64 (6.64) Time: 0.410s, 2494.73/s (0.437s, 2342.65/s) LR: 3.956e-01 Data: 0.029 (0.050) +Train: 10 [ 100/312 ( 32%)] Loss: 6.64 (6.63) Time: 0.419s, 2443.66/s (0.426s, 2406.43/s) LR: 3.956e-01 Data: 0.028 (0.039) +Train: 10 [ 150/312 ( 48%)] Loss: 6.62 (6.64) Time: 0.423s, 2419.56/s (0.425s, 2411.14/s) LR: 3.956e-01 Data: 0.028 (0.035) +Train: 10 [ 200/312 ( 64%)] Loss: 6.64 (6.63) Time: 0.431s, 2376.25/s (0.425s, 2411.25/s) LR: 3.956e-01 Data: 0.028 (0.033) +Train: 10 [ 250/312 ( 80%)] Loss: 6.64 (6.63) Time: 0.422s, 2427.06/s (0.425s, 2409.05/s) LR: 3.956e-01 Data: 0.029 (0.032) +Train: 10 [ 300/312 ( 96%)] Loss: 6.63 (6.63) Time: 0.426s, 2401.87/s (0.425s, 2407.33/s) LR: 3.956e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.440 (1.440) Loss: 5.945 ( 5.945) Acc@1: 4.785 ( 4.785) Acc@5: 13.281 ( 13.281) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 5.870 ( 5.919) Acc@1: 7.547 ( 5.554) Acc@5: 16.981 ( 15.396) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-1.pth.tar', 0.7839999990081787) + +Train: 11 [ 0/312 ( 0%)] Loss: 6.64 (6.64) Time: 1.730s, 592.04/s (1.730s, 592.04/s) LR: 3.947e-01 Data: 1.350 (1.350) +Train: 11 [ 50/312 ( 16%)] Loss: 6.52 (6.59) Time: 0.417s, 2453.82/s (0.443s, 2311.27/s) LR: 3.947e-01 Data: 0.026 (0.053) +Train: 11 [ 100/312 ( 32%)] Loss: 6.59 (6.60) Time: 0.424s, 2416.87/s (0.433s, 2367.56/s) LR: 3.947e-01 Data: 0.029 (0.040) +Train: 11 [ 150/312 ( 48%)] Loss: 6.58 (6.60) Time: 0.431s, 2373.44/s (0.430s, 2383.71/s) LR: 3.947e-01 Data: 0.028 (0.036) +Train: 11 [ 200/312 ( 64%)] Loss: 6.62 (6.60) Time: 0.414s, 2473.53/s (0.428s, 2393.92/s) LR: 3.947e-01 Data: 0.026 (0.034) +Train: 11 [ 250/312 ( 80%)] Loss: 6.60 (6.60) Time: 0.421s, 2433.94/s (0.426s, 2402.64/s) LR: 3.947e-01 Data: 0.026 (0.033) +Train: 11 [ 300/312 ( 96%)] Loss: 6.61 (6.60) Time: 0.425s, 2407.91/s (0.426s, 2404.61/s) LR: 3.947e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.433 (1.433) Loss: 5.672 ( 5.672) Acc@1: 8.203 ( 8.203) Acc@5: 21.484 ( 21.484) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 5.578 ( 5.657) Acc@1: 9.670 ( 8.090) Acc@5: 25.943 ( 21.510) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-2.pth.tar', 1.2600000004577636) + +Train: 12 [ 0/312 ( 0%)] Loss: 6.56 (6.56) Time: 1.657s, 618.07/s (1.657s, 618.07/s) LR: 3.937e-01 Data: 1.275 (1.275) +Train: 12 [ 50/312 ( 16%)] Loss: 6.54 (6.57) Time: 0.424s, 2412.82/s (0.446s, 2296.57/s) LR: 3.937e-01 Data: 0.028 (0.051) +Train: 12 [ 100/312 ( 32%)] Loss: 6.61 (6.57) Time: 0.411s, 2492.51/s (0.433s, 2363.62/s) LR: 3.937e-01 Data: 0.028 (0.040) +Train: 12 [ 150/312 ( 48%)] Loss: 6.63 (6.57) Time: 0.406s, 2523.06/s (0.425s, 2409.96/s) LR: 3.937e-01 Data: 0.027 (0.036) +Train: 12 [ 200/312 ( 64%)] Loss: 6.56 (6.57) Time: 0.405s, 2528.64/s (0.420s, 2437.26/s) LR: 3.937e-01 Data: 0.027 (0.034) +Train: 12 [ 250/312 ( 80%)] Loss: 6.57 (6.57) Time: 0.407s, 2515.83/s (0.417s, 2454.49/s) LR: 3.937e-01 Data: 0.027 (0.032) +Train: 12 [ 300/312 ( 96%)] Loss: 6.52 (6.57) Time: 0.407s, 2515.00/s (0.415s, 2465.60/s) LR: 3.937e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.439 (1.439) Loss: 5.544 ( 5.544) Acc@1: 8.789 ( 8.789) Acc@5: 23.926 ( 23.926) +Test: [ 48/48] Time: 0.090 (0.329) Loss: 5.466 ( 5.548) Acc@1: 12.146 ( 9.554) Acc@5: 26.061 ( 23.796) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-3.pth.tar', 1.8100000009155273) + +Train: 13 [ 0/312 ( 0%)] Loss: 6.53 (6.53) Time: 1.545s, 662.94/s (1.545s, 662.94/s) LR: 3.926e-01 Data: 1.171 (1.171) +Train: 13 [ 50/312 ( 16%)] Loss: 6.48 (6.52) Time: 0.407s, 2513.82/s (0.431s, 2375.64/s) LR: 3.926e-01 Data: 0.027 (0.049) +Train: 13 [ 100/312 ( 32%)] Loss: 6.53 (6.53) Time: 0.423s, 2422.24/s (0.424s, 2414.82/s) LR: 3.926e-01 Data: 0.026 (0.039) +Train: 13 [ 150/312 ( 48%)] Loss: 6.54 (6.53) Time: 0.421s, 2429.93/s (0.424s, 2417.62/s) LR: 3.926e-01 Data: 0.029 (0.035) +Train: 13 [ 200/312 ( 64%)] Loss: 6.60 (6.54) Time: 0.412s, 2488.12/s (0.421s, 2432.87/s) LR: 3.926e-01 Data: 0.027 (0.033) +Train: 13 [ 250/312 ( 80%)] Loss: 6.49 (6.54) Time: 0.411s, 2490.94/s (0.419s, 2443.40/s) LR: 3.926e-01 Data: 0.027 (0.032) +Train: 13 [ 300/312 ( 96%)] Loss: 6.53 (6.54) Time: 0.419s, 2444.96/s (0.418s, 2447.63/s) LR: 3.926e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.427 (1.427) Loss: 5.568 ( 5.568) Acc@1: 9.668 ( 9.668) Acc@5: 24.805 ( 24.805) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 5.481 ( 5.538) Acc@1: 13.561 ( 10.246) Acc@5: 27.005 ( 25.424) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-4.pth.tar', 1.903999999885559) + +Train: 14 [ 0/312 ( 0%)] Loss: 6.53 (6.53) Time: 2.028s, 505.05/s (2.028s, 505.05/s) LR: 3.915e-01 Data: 1.131 (1.131) +Train: 14 [ 50/312 ( 16%)] Loss: 6.49 (6.49) Time: 0.420s, 2439.25/s (0.447s, 2291.03/s) LR: 3.915e-01 Data: 0.025 (0.049) +Train: 14 [ 100/312 ( 32%)] Loss: 6.48 (6.49) Time: 0.426s, 2406.35/s (0.433s, 2364.26/s) LR: 3.915e-01 Data: 0.026 (0.038) +Train: 14 [ 150/312 ( 48%)] Loss: 6.45 (6.50) Time: 0.411s, 2488.73/s (0.428s, 2390.05/s) LR: 3.915e-01 Data: 0.028 (0.035) +Train: 14 [ 200/312 ( 64%)] Loss: 6.53 (6.50) Time: 0.407s, 2512.90/s (0.424s, 2417.00/s) LR: 3.915e-01 Data: 0.026 (0.033) +Train: 14 [ 250/312 ( 80%)] Loss: 6.54 (6.50) Time: 0.408s, 2512.79/s (0.421s, 2435.05/s) LR: 3.915e-01 Data: 0.028 (0.032) +Train: 14 [ 300/312 ( 96%)] Loss: 6.52 (6.50) Time: 0.415s, 2469.31/s (0.419s, 2445.92/s) LR: 3.915e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.433 (1.433) Loss: 5.272 ( 5.272) Acc@1: 12.305 ( 12.305) Acc@5: 29.980 ( 29.980) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 5.210 ( 5.287) Acc@1: 14.269 ( 12.366) Acc@5: 31.250 ( 29.672) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-5.pth.tar', 2.794000001220703) + +Train: 15 [ 0/312 ( 0%)] Loss: 6.44 (6.44) Time: 1.627s, 629.44/s (1.627s, 629.44/s) LR: 3.902e-01 Data: 1.246 (1.246) +Train: 15 [ 50/312 ( 16%)] Loss: 6.53 (6.46) Time: 0.424s, 2415.79/s (0.445s, 2300.21/s) LR: 3.902e-01 Data: 0.026 (0.051) +Train: 15 [ 100/312 ( 32%)] Loss: 6.51 (6.47) Time: 0.414s, 2471.17/s (0.430s, 2381.59/s) LR: 3.902e-01 Data: 0.028 (0.039) +Train: 15 [ 150/312 ( 48%)] Loss: 6.48 (6.47) Time: 0.418s, 2452.64/s (0.424s, 2413.59/s) LR: 3.902e-01 Data: 0.026 (0.036) +Train: 15 [ 200/312 ( 64%)] Loss: 6.53 (6.47) Time: 0.422s, 2427.75/s (0.422s, 2424.13/s) LR: 3.902e-01 Data: 0.027 (0.034) +Train: 15 [ 250/312 ( 80%)] Loss: 6.47 (6.47) Time: 0.418s, 2452.61/s (0.423s, 2423.07/s) LR: 3.902e-01 Data: 0.028 (0.032) +Train: 15 [ 300/312 ( 96%)] Loss: 6.43 (6.47) Time: 0.413s, 2477.72/s (0.421s, 2429.72/s) LR: 3.902e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.440 (1.440) Loss: 5.165 ( 5.165) Acc@1: 13.477 ( 13.477) Acc@5: 31.445 ( 31.445) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 5.055 ( 5.161) Acc@1: 16.038 ( 13.138) Acc@5: 33.491 ( 31.276) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-6.pth.tar', 3.7980000045776365) + +Train: 16 [ 0/312 ( 0%)] Loss: 6.43 (6.43) Time: 1.930s, 530.43/s (1.930s, 530.43/s) LR: 3.889e-01 Data: 1.553 (1.553) +Train: 16 [ 50/312 ( 16%)] Loss: 6.35 (6.42) Time: 0.420s, 2438.96/s (0.447s, 2292.90/s) LR: 3.889e-01 Data: 0.028 (0.057) +Train: 16 [ 100/312 ( 32%)] Loss: 6.50 (6.43) Time: 0.412s, 2486.88/s (0.434s, 2358.48/s) LR: 3.889e-01 Data: 0.026 (0.042) +Train: 16 [ 150/312 ( 48%)] Loss: 6.50 (6.43) Time: 0.411s, 2489.71/s (0.427s, 2397.25/s) LR: 3.889e-01 Data: 0.028 (0.037) +Train: 16 [ 200/312 ( 64%)] Loss: 6.44 (6.43) Time: 0.414s, 2472.80/s (0.424s, 2417.81/s) LR: 3.889e-01 Data: 0.028 (0.035) +Train: 16 [ 250/312 ( 80%)] Loss: 6.44 (6.44) Time: 0.422s, 2424.62/s (0.422s, 2424.92/s) LR: 3.889e-01 Data: 0.027 (0.034) +Train: 16 [ 300/312 ( 96%)] Loss: 6.42 (6.44) Time: 0.429s, 2388.92/s (0.423s, 2422.44/s) LR: 3.889e-01 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.673 (1.673) Loss: 5.194 ( 5.194) Acc@1: 14.746 ( 14.746) Acc@5: 30.273 ( 30.273) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 5.152 ( 5.230) Acc@1: 14.269 ( 12.626) Acc@5: 30.896 ( 29.332) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-7.pth.tar', 4.726000002593994) + +Train: 17 [ 0/312 ( 0%)] Loss: 6.41 (6.41) Time: 1.778s, 576.07/s (1.778s, 576.07/s) LR: 3.875e-01 Data: 1.041 (1.041) +Train: 17 [ 50/312 ( 16%)] Loss: 6.44 (6.39) Time: 0.422s, 2423.76/s (0.447s, 2291.54/s) LR: 3.875e-01 Data: 0.028 (0.047) +Train: 17 [ 100/312 ( 32%)] Loss: 6.36 (6.39) Time: 0.419s, 2446.63/s (0.435s, 2356.06/s) LR: 3.875e-01 Data: 0.026 (0.037) +Train: 17 [ 150/312 ( 48%)] Loss: 6.45 (6.39) Time: 0.421s, 2434.00/s (0.431s, 2376.00/s) LR: 3.875e-01 Data: 0.027 (0.034) +Train: 17 [ 200/312 ( 64%)] Loss: 6.44 (6.40) Time: 0.422s, 2428.24/s (0.429s, 2388.03/s) LR: 3.875e-01 Data: 0.024 (0.033) +Train: 17 [ 250/312 ( 80%)] Loss: 6.36 (6.40) Time: 0.423s, 2423.46/s (0.428s, 2391.38/s) LR: 3.875e-01 Data: 0.027 (0.032) +Train: 17 [ 300/312 ( 96%)] Loss: 6.37 (6.40) Time: 0.420s, 2435.43/s (0.427s, 2397.34/s) LR: 3.875e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 5.087 ( 5.087) Acc@1: 15.625 ( 15.625) Acc@5: 34.180 ( 34.180) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.987 ( 5.090) Acc@1: 18.750 ( 15.188) Acc@5: 37.028 ( 34.124) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-8.pth.tar', 5.476000002593994) + +Train: 18 [ 0/312 ( 0%)] Loss: 6.42 (6.42) Time: 1.657s, 618.03/s (1.657s, 618.03/s) LR: 3.860e-01 Data: 1.236 (1.236) +Train: 18 [ 50/312 ( 16%)] Loss: 6.29 (6.36) Time: 0.416s, 2463.79/s (0.445s, 2300.44/s) LR: 3.860e-01 Data: 0.026 (0.051) +Train: 18 [ 100/312 ( 32%)] Loss: 6.38 (6.36) Time: 0.419s, 2444.61/s (0.432s, 2372.55/s) LR: 3.860e-01 Data: 0.024 (0.039) +Train: 18 [ 150/312 ( 48%)] Loss: 6.29 (6.37) Time: 0.423s, 2420.25/s (0.428s, 2392.61/s) LR: 3.860e-01 Data: 0.026 (0.035) +Train: 18 [ 200/312 ( 64%)] Loss: 6.36 (6.37) Time: 0.421s, 2435.06/s (0.426s, 2400.97/s) LR: 3.860e-01 Data: 0.027 (0.033) +Train: 18 [ 250/312 ( 80%)] Loss: 6.41 (6.37) Time: 0.420s, 2438.65/s (0.425s, 2409.37/s) LR: 3.860e-01 Data: 0.027 (0.032) +Train: 18 [ 300/312 ( 96%)] Loss: 6.35 (6.37) Time: 0.422s, 2427.48/s (0.424s, 2413.58/s) LR: 3.860e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.444 (1.444) Loss: 5.026 ( 5.026) Acc@1: 14.941 ( 14.941) Acc@5: 36.133 ( 36.133) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.921 ( 5.045) Acc@1: 17.807 ( 15.478) Acc@5: 37.500 ( 34.544) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-10.pth.tar', 5.554000005950928) + +Train: 19 [ 0/312 ( 0%)] Loss: 6.37 (6.37) Time: 1.542s, 664.11/s (1.542s, 664.11/s) LR: 3.844e-01 Data: 1.111 (1.111) +Train: 19 [ 50/312 ( 16%)] Loss: 6.32 (6.32) Time: 0.419s, 2445.14/s (0.438s, 2335.34/s) LR: 3.844e-01 Data: 0.027 (0.049) +Train: 19 [ 100/312 ( 32%)] Loss: 6.35 (6.33) Time: 0.419s, 2443.52/s (0.429s, 2387.15/s) LR: 3.844e-01 Data: 0.027 (0.038) +Train: 19 [ 150/312 ( 48%)] Loss: 6.29 (6.34) Time: 0.421s, 2434.93/s (0.426s, 2401.88/s) LR: 3.844e-01 Data: 0.027 (0.034) +Train: 19 [ 200/312 ( 64%)] Loss: 6.31 (6.34) Time: 0.425s, 2407.75/s (0.426s, 2406.51/s) LR: 3.844e-01 Data: 0.027 (0.033) +Train: 19 [ 250/312 ( 80%)] Loss: 6.38 (6.34) Time: 0.421s, 2429.57/s (0.425s, 2409.51/s) LR: 3.844e-01 Data: 0.025 (0.031) +Train: 19 [ 300/312 ( 96%)] Loss: 6.34 (6.34) Time: 0.418s, 2447.10/s (0.425s, 2411.38/s) LR: 3.844e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 4.804 ( 4.804) Acc@1: 17.871 ( 17.871) Acc@5: 38.770 ( 38.770) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.720 ( 4.828) Acc@1: 18.750 ( 16.478) Acc@5: 39.269 ( 36.778) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-9.pth.tar', 6.49200001159668) + +Train: 20 [ 0/312 ( 0%)] Loss: 6.41 (6.41) Time: 1.602s, 639.33/s (1.602s, 639.33/s) LR: 3.827e-01 Data: 1.090 (1.090) +Train: 20 [ 50/312 ( 16%)] Loss: 6.28 (6.29) Time: 0.414s, 2474.01/s (0.437s, 2344.03/s) LR: 3.827e-01 Data: 0.028 (0.048) +Train: 20 [ 100/312 ( 32%)] Loss: 6.33 (6.30) Time: 0.416s, 2459.02/s (0.426s, 2401.33/s) LR: 3.827e-01 Data: 0.028 (0.038) +Train: 20 [ 150/312 ( 48%)] Loss: 6.36 (6.30) Time: 0.423s, 2419.67/s (0.425s, 2412.21/s) LR: 3.827e-01 Data: 0.027 (0.035) +Train: 20 [ 200/312 ( 64%)] Loss: 6.35 (6.31) Time: 0.421s, 2429.69/s (0.424s, 2416.41/s) LR: 3.827e-01 Data: 0.028 (0.033) +Train: 20 [ 250/312 ( 80%)] Loss: 6.29 (6.31) Time: 0.417s, 2457.81/s (0.422s, 2423.84/s) LR: 3.827e-01 Data: 0.027 (0.032) +Train: 20 [ 300/312 ( 96%)] Loss: 6.28 (6.31) Time: 0.420s, 2437.33/s (0.422s, 2427.68/s) LR: 3.827e-01 Data: 0.029 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 4.978 ( 4.978) Acc@1: 16.504 ( 16.504) Acc@5: 36.719 ( 36.719) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.868 ( 4.968) Acc@1: 18.986 ( 16.898) Acc@5: 38.325 ( 36.890) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-11.pth.tar', 8.090000014953613) + +Train: 21 [ 0/312 ( 0%)] Loss: 6.27 (6.27) Time: 1.737s, 589.43/s (1.737s, 589.43/s) LR: 3.810e-01 Data: 1.354 (1.354) +Train: 21 [ 50/312 ( 16%)] Loss: 6.32 (6.27) Time: 0.419s, 2441.83/s (0.442s, 2315.03/s) LR: 3.810e-01 Data: 0.026 (0.054) +Train: 21 [ 100/312 ( 32%)] Loss: 6.32 (6.27) Time: 0.419s, 2441.27/s (0.431s, 2373.86/s) LR: 3.810e-01 Data: 0.027 (0.041) +Train: 21 [ 150/312 ( 48%)] Loss: 6.31 (6.27) Time: 0.420s, 2436.62/s (0.428s, 2392.56/s) LR: 3.810e-01 Data: 0.027 (0.036) +Train: 21 [ 200/312 ( 64%)] Loss: 6.33 (6.28) Time: 0.422s, 2429.01/s (0.427s, 2400.67/s) LR: 3.810e-01 Data: 0.027 (0.034) +Train: 21 [ 250/312 ( 80%)] Loss: 6.32 (6.28) Time: 0.425s, 2410.22/s (0.426s, 2405.02/s) LR: 3.810e-01 Data: 0.028 (0.033) +Train: 21 [ 300/312 ( 96%)] Loss: 6.30 (6.29) Time: 0.425s, 2407.36/s (0.426s, 2406.01/s) LR: 3.810e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.436 (1.436) Loss: 4.894 ( 4.894) Acc@1: 17.676 ( 17.676) Acc@5: 40.918 ( 40.918) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.814 ( 4.930) Acc@1: 18.396 ( 17.068) Acc@5: 41.745 ( 37.756) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-12.pth.tar', 9.55400000793457) + +Train: 22 [ 0/312 ( 0%)] Loss: 6.28 (6.28) Time: 1.714s, 597.34/s (1.714s, 597.34/s) LR: 3.791e-01 Data: 1.325 (1.325) +Train: 22 [ 50/312 ( 16%)] Loss: 6.22 (6.24) Time: 0.426s, 2405.27/s (0.457s, 2243.07/s) LR: 3.791e-01 Data: 0.028 (0.061) +Train: 22 [ 100/312 ( 32%)] Loss: 6.11 (6.24) Time: 0.426s, 2403.33/s (0.441s, 2323.35/s) LR: 3.791e-01 Data: 0.026 (0.044) +Train: 22 [ 150/312 ( 48%)] Loss: 6.31 (6.25) Time: 0.429s, 2385.70/s (0.436s, 2351.18/s) LR: 3.791e-01 Data: 0.033 (0.039) +Train: 22 [ 200/312 ( 64%)] Loss: 6.26 (6.25) Time: 0.422s, 2425.11/s (0.433s, 2365.55/s) LR: 3.791e-01 Data: 0.027 (0.036) +Train: 22 [ 250/312 ( 80%)] Loss: 6.24 (6.26) Time: 0.421s, 2432.89/s (0.431s, 2374.62/s) LR: 3.791e-01 Data: 0.028 (0.035) +Train: 22 [ 300/312 ( 96%)] Loss: 6.26 (6.26) Time: 0.426s, 2405.36/s (0.430s, 2380.13/s) LR: 3.791e-01 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.438 (1.438) Loss: 4.711 ( 4.711) Acc@1: 20.801 ( 20.801) Acc@5: 40.820 ( 40.820) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.577 ( 4.716) Acc@1: 21.344 ( 19.278) Acc@5: 44.458 ( 41.002) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-13.pth.tar', 10.246000008544922) + +Train: 23 [ 0/312 ( 0%)] Loss: 6.18 (6.18) Time: 1.653s, 619.45/s (1.653s, 619.45/s) LR: 3.772e-01 Data: 1.272 (1.272) +Train: 23 [ 50/312 ( 16%)] Loss: 6.20 (6.21) Time: 0.429s, 2388.51/s (0.449s, 2280.79/s) LR: 3.772e-01 Data: 0.032 (0.052) +Train: 23 [ 100/312 ( 32%)] Loss: 6.26 (6.21) Time: 0.417s, 2456.89/s (0.437s, 2344.23/s) LR: 3.772e-01 Data: 0.027 (0.040) +Train: 23 [ 150/312 ( 48%)] Loss: 6.24 (6.22) Time: 0.418s, 2452.66/s (0.430s, 2378.69/s) LR: 3.772e-01 Data: 0.027 (0.036) +Train: 23 [ 200/312 ( 64%)] Loss: 6.28 (6.22) Time: 0.420s, 2435.89/s (0.427s, 2395.94/s) LR: 3.772e-01 Data: 0.028 (0.034) +Train: 23 [ 250/312 ( 80%)] Loss: 6.27 (6.23) Time: 0.423s, 2418.26/s (0.427s, 2399.40/s) LR: 3.772e-01 Data: 0.026 (0.032) +Train: 23 [ 300/312 ( 96%)] Loss: 6.20 (6.23) Time: 0.415s, 2467.04/s (0.425s, 2407.06/s) LR: 3.772e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.453 (1.453) Loss: 4.551 ( 4.551) Acc@1: 22.754 ( 22.754) Acc@5: 44.141 ( 44.141) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.487 ( 4.596) Acc@1: 24.057 ( 20.880) Acc@5: 46.698 ( 43.328) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-14.pth.tar', 12.366000008850097) + +Train: 24 [ 0/312 ( 0%)] Loss: 6.21 (6.21) Time: 1.613s, 634.86/s (1.613s, 634.86/s) LR: 3.753e-01 Data: 1.160 (1.160) +Train: 24 [ 50/312 ( 16%)] Loss: 6.21 (6.18) Time: 0.424s, 2414.59/s (0.447s, 2290.35/s) LR: 3.753e-01 Data: 0.027 (0.049) +Train: 24 [ 100/312 ( 32%)] Loss: 6.17 (6.18) Time: 0.410s, 2495.26/s (0.432s, 2372.02/s) LR: 3.753e-01 Data: 0.027 (0.038) +Train: 24 [ 150/312 ( 48%)] Loss: 6.19 (6.20) Time: 0.409s, 2505.07/s (0.424s, 2413.99/s) LR: 3.753e-01 Data: 0.027 (0.035) +Train: 24 [ 200/312 ( 64%)] Loss: 6.20 (6.20) Time: 0.407s, 2517.76/s (0.420s, 2437.92/s) LR: 3.753e-01 Data: 0.027 (0.033) +Train: 24 [ 250/312 ( 80%)] Loss: 6.19 (6.20) Time: 0.411s, 2493.23/s (0.418s, 2450.79/s) LR: 3.753e-01 Data: 0.029 (0.032) +Train: 24 [ 300/312 ( 96%)] Loss: 6.14 (6.21) Time: 0.416s, 2461.73/s (0.417s, 2454.07/s) LR: 3.753e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 4.590 ( 4.590) Acc@1: 22.168 ( 22.168) Acc@5: 45.703 ( 45.703) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.505 ( 4.621) Acc@1: 21.934 ( 21.138) Acc@5: 46.816 ( 43.394) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-16.pth.tar', 12.626000008850097) + +Train: 25 [ 0/312 ( 0%)] Loss: 6.13 (6.13) Time: 1.612s, 635.34/s (1.612s, 635.34/s) LR: 3.732e-01 Data: 1.227 (1.227) +Train: 25 [ 50/312 ( 16%)] Loss: 6.12 (6.16) Time: 0.415s, 2464.69/s (0.442s, 2318.52/s) LR: 3.732e-01 Data: 0.026 (0.051) +Train: 25 [ 100/312 ( 32%)] Loss: 6.10 (6.16) Time: 0.413s, 2479.04/s (0.428s, 2390.10/s) LR: 3.732e-01 Data: 0.027 (0.039) +Train: 25 [ 150/312 ( 48%)] Loss: 6.19 (6.17) Time: 0.420s, 2440.04/s (0.424s, 2412.88/s) LR: 3.732e-01 Data: 0.029 (0.035) +Train: 25 [ 200/312 ( 64%)] Loss: 6.08 (6.17) Time: 0.422s, 2424.52/s (0.424s, 2415.04/s) LR: 3.732e-01 Data: 0.027 (0.033) +Train: 25 [ 250/312 ( 80%)] Loss: 6.22 (6.18) Time: 0.421s, 2433.45/s (0.424s, 2415.73/s) LR: 3.732e-01 Data: 0.028 (0.032) +Train: 25 [ 300/312 ( 96%)] Loss: 6.24 (6.18) Time: 0.423s, 2423.41/s (0.423s, 2418.20/s) LR: 3.732e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 4.699 ( 4.699) Acc@1: 21.484 ( 21.484) Acc@5: 44.043 ( 44.043) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.541 ( 4.683) Acc@1: 23.231 ( 21.076) Acc@5: 47.877 ( 43.780) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-15.pth.tar', 13.138000017700195) + +Train: 26 [ 0/312 ( 0%)] Loss: 6.19 (6.19) Time: 1.667s, 614.32/s (1.667s, 614.32/s) LR: 3.711e-01 Data: 1.283 (1.283) +Train: 26 [ 50/312 ( 16%)] Loss: 6.15 (6.12) Time: 0.421s, 2430.02/s (0.448s, 2286.28/s) LR: 3.711e-01 Data: 0.026 (0.052) +Train: 26 [ 100/312 ( 32%)] Loss: 6.09 (6.13) Time: 0.421s, 2434.39/s (0.435s, 2353.82/s) LR: 3.711e-01 Data: 0.028 (0.039) +Train: 26 [ 150/312 ( 48%)] Loss: 6.22 (6.14) Time: 0.422s, 2424.61/s (0.430s, 2381.80/s) LR: 3.711e-01 Data: 0.026 (0.035) +Train: 26 [ 200/312 ( 64%)] Loss: 6.23 (6.15) Time: 0.423s, 2420.41/s (0.428s, 2393.83/s) LR: 3.711e-01 Data: 0.028 (0.034) +Train: 26 [ 250/312 ( 80%)] Loss: 6.17 (6.15) Time: 0.422s, 2426.50/s (0.426s, 2401.55/s) LR: 3.711e-01 Data: 0.031 (0.032) +Train: 26 [ 300/312 ( 96%)] Loss: 6.12 (6.15) Time: 0.418s, 2452.09/s (0.425s, 2409.49/s) LR: 3.711e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 4.438 ( 4.438) Acc@1: 22.949 ( 22.949) Acc@5: 48.340 ( 48.340) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.351 ( 4.481) Acc@1: 25.825 ( 23.350) Acc@5: 48.349 ( 46.748) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-17.pth.tar', 15.188) + +Train: 27 [ 0/312 ( 0%)] Loss: 6.15 (6.15) Time: 1.788s, 572.58/s (1.788s, 572.58/s) LR: 3.689e-01 Data: 1.407 (1.407) +Train: 27 [ 50/312 ( 16%)] Loss: 6.12 (6.09) Time: 0.420s, 2435.52/s (0.446s, 2298.42/s) LR: 3.689e-01 Data: 0.025 (0.054) +Train: 27 [ 100/312 ( 32%)] Loss: 6.02 (6.10) Time: 0.420s, 2436.82/s (0.433s, 2364.92/s) LR: 3.689e-01 Data: 0.027 (0.041) +Train: 27 [ 150/312 ( 48%)] Loss: 6.24 (6.11) Time: 0.421s, 2432.06/s (0.429s, 2387.22/s) LR: 3.689e-01 Data: 0.027 (0.037) +Train: 27 [ 200/312 ( 64%)] Loss: 6.19 (6.12) Time: 0.424s, 2417.85/s (0.427s, 2398.19/s) LR: 3.689e-01 Data: 0.026 (0.034) +Train: 27 [ 250/312 ( 80%)] Loss: 6.24 (6.13) Time: 0.419s, 2443.14/s (0.426s, 2404.90/s) LR: 3.689e-01 Data: 0.028 (0.033) +Train: 27 [ 300/312 ( 96%)] Loss: 6.17 (6.13) Time: 0.423s, 2422.32/s (0.425s, 2409.67/s) LR: 3.689e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.435 (1.435) Loss: 4.564 ( 4.564) Acc@1: 20.410 ( 20.410) Acc@5: 44.824 ( 44.824) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.475 ( 4.582) Acc@1: 22.170 ( 21.126) Acc@5: 44.929 ( 43.174) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-18.pth.tar', 15.478000010375977) + +Train: 28 [ 0/312 ( 0%)] Loss: 6.05 (6.05) Time: 1.896s, 540.20/s (1.896s, 540.20/s) LR: 3.666e-01 Data: 1.512 (1.512) +Train: 28 [ 50/312 ( 16%)] Loss: 6.10 (6.06) Time: 0.423s, 2420.28/s (0.447s, 2288.55/s) LR: 3.666e-01 Data: 0.033 (0.057) +Train: 28 [ 100/312 ( 32%)] Loss: 6.07 (6.08) Time: 0.415s, 2470.11/s (0.432s, 2368.49/s) LR: 3.666e-01 Data: 0.029 (0.042) +Train: 28 [ 150/312 ( 48%)] Loss: 6.16 (6.09) Time: 0.422s, 2423.75/s (0.428s, 2394.90/s) LR: 3.666e-01 Data: 0.027 (0.037) +Train: 28 [ 200/312 ( 64%)] Loss: 6.22 (6.09) Time: 0.422s, 2428.69/s (0.426s, 2401.76/s) LR: 3.666e-01 Data: 0.024 (0.035) +Train: 28 [ 250/312 ( 80%)] Loss: 6.12 (6.10) Time: 0.418s, 2449.50/s (0.425s, 2410.68/s) LR: 3.666e-01 Data: 0.027 (0.033) +Train: 28 [ 300/312 ( 96%)] Loss: 6.09 (6.10) Time: 0.419s, 2441.90/s (0.424s, 2415.71/s) LR: 3.666e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.455 (1.455) Loss: 4.399 ( 4.399) Acc@1: 24.609 ( 24.609) Acc@5: 48.242 ( 48.242) +Test: [ 48/48] Time: 0.090 (0.331) Loss: 4.326 ( 4.430) Acc@1: 26.179 ( 23.512) Acc@5: 48.585 ( 46.860) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-19.pth.tar', 16.478000032348632) + +Train: 29 [ 0/312 ( 0%)] Loss: 6.02 (6.02) Time: 1.837s, 557.30/s (1.837s, 557.30/s) LR: 3.642e-01 Data: 1.464 (1.464) +Train: 29 [ 50/312 ( 16%)] Loss: 6.10 (6.04) Time: 0.405s, 2526.54/s (0.436s, 2350.72/s) LR: 3.642e-01 Data: 0.028 (0.056) +Train: 29 [ 100/312 ( 32%)] Loss: 6.05 (6.05) Time: 0.406s, 2520.26/s (0.422s, 2426.72/s) LR: 3.642e-01 Data: 0.027 (0.042) +Train: 29 [ 150/312 ( 48%)] Loss: 6.04 (6.06) Time: 0.414s, 2476.11/s (0.418s, 2447.86/s) LR: 3.642e-01 Data: 0.028 (0.037) +Train: 29 [ 200/312 ( 64%)] Loss: 6.16 (6.07) Time: 0.425s, 2407.17/s (0.419s, 2446.13/s) LR: 3.642e-01 Data: 0.028 (0.035) +Train: 29 [ 250/312 ( 80%)] Loss: 6.07 (6.08) Time: 0.423s, 2419.10/s (0.419s, 2441.62/s) LR: 3.642e-01 Data: 0.028 (0.033) +Train: 29 [ 300/312 ( 96%)] Loss: 6.07 (6.08) Time: 0.419s, 2441.92/s (0.420s, 2440.06/s) LR: 3.642e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 4.374 ( 4.374) Acc@1: 24.902 ( 24.902) Acc@5: 48.242 ( 48.242) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.227 ( 4.373) Acc@1: 28.066 ( 25.094) Acc@5: 51.297 ( 48.636) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-20.pth.tar', 16.898000005493163) + +Train: 30 [ 0/312 ( 0%)] Loss: 5.94 (5.94) Time: 1.656s, 618.51/s (1.656s, 618.51/s) LR: 3.618e-01 Data: 1.281 (1.281) +Train: 30 [ 50/312 ( 16%)] Loss: 6.00 (6.01) Time: 0.410s, 2500.12/s (0.433s, 2365.66/s) LR: 3.618e-01 Data: 0.029 (0.052) +Train: 30 [ 100/312 ( 32%)] Loss: 6.05 (6.03) Time: 0.411s, 2494.49/s (0.421s, 2430.30/s) LR: 3.618e-01 Data: 0.027 (0.040) +Train: 30 [ 150/312 ( 48%)] Loss: 6.02 (6.04) Time: 0.424s, 2413.98/s (0.420s, 2439.80/s) LR: 3.618e-01 Data: 0.027 (0.036) +Train: 30 [ 200/312 ( 64%)] Loss: 6.09 (6.05) Time: 0.424s, 2416.00/s (0.421s, 2434.94/s) LR: 3.618e-01 Data: 0.027 (0.034) +Train: 30 [ 250/312 ( 80%)] Loss: 6.13 (6.05) Time: 0.427s, 2395.58/s (0.421s, 2432.33/s) LR: 3.618e-01 Data: 0.032 (0.033) +Train: 30 [ 300/312 ( 96%)] Loss: 6.06 (6.06) Time: 0.427s, 2396.29/s (0.421s, 2429.60/s) LR: 3.618e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.457 (1.457) Loss: 4.585 ( 4.585) Acc@1: 21.680 ( 21.680) Acc@5: 44.336 ( 44.336) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.511 ( 4.635) Acc@1: 24.057 ( 21.742) Acc@5: 47.288 ( 43.726) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-30.pth.tar', 21.74200001037598) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-21.pth.tar', 17.06800000793457) + +Train: 31 [ 0/312 ( 0%)] Loss: 5.87 (5.87) Time: 1.883s, 543.71/s (1.883s, 543.71/s) LR: 3.593e-01 Data: 1.501 (1.501) +Train: 31 [ 50/312 ( 16%)] Loss: 6.07 (5.99) Time: 0.419s, 2444.62/s (0.448s, 2284.86/s) LR: 3.593e-01 Data: 0.028 (0.056) +Train: 31 [ 100/312 ( 32%)] Loss: 5.88 (6.00) Time: 0.419s, 2444.28/s (0.434s, 2360.88/s) LR: 3.593e-01 Data: 0.027 (0.042) +Train: 31 [ 150/312 ( 48%)] Loss: 6.05 (6.01) Time: 0.415s, 2469.80/s (0.429s, 2388.37/s) LR: 3.593e-01 Data: 0.027 (0.037) +Train: 31 [ 200/312 ( 64%)] Loss: 6.07 (6.02) Time: 0.418s, 2447.05/s (0.427s, 2398.30/s) LR: 3.593e-01 Data: 0.027 (0.035) +Train: 31 [ 250/312 ( 80%)] Loss: 6.00 (6.03) Time: 0.416s, 2459.79/s (0.426s, 2406.23/s) LR: 3.593e-01 Data: 0.027 (0.033) +Train: 31 [ 300/312 ( 96%)] Loss: 6.07 (6.04) Time: 0.420s, 2435.70/s (0.424s, 2413.62/s) LR: 3.593e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.459 (1.459) Loss: 4.398 ( 4.398) Acc@1: 23.926 ( 23.926) Acc@5: 47.656 ( 47.656) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.294 ( 4.422) Acc@1: 25.472 ( 24.376) Acc@5: 49.057 ( 47.738) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-30.pth.tar', 21.74200001037598) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-22.pth.tar', 19.27799999572754) + +Train: 32 [ 0/312 ( 0%)] Loss: 5.92 (5.92) Time: 1.598s, 640.97/s (1.598s, 640.97/s) LR: 3.567e-01 Data: 1.218 (1.218) +Train: 32 [ 50/312 ( 16%)] Loss: 6.06 (5.97) Time: 0.409s, 2503.10/s (0.437s, 2345.71/s) LR: 3.567e-01 Data: 0.028 (0.051) +Train: 32 [ 100/312 ( 32%)] Loss: 6.01 (5.98) Time: 0.407s, 2514.48/s (0.423s, 2421.53/s) LR: 3.567e-01 Data: 0.027 (0.039) +Train: 32 [ 150/312 ( 48%)] Loss: 6.02 (5.99) Time: 0.417s, 2456.86/s (0.418s, 2446.98/s) LR: 3.567e-01 Data: 0.033 (0.035) +Train: 32 [ 200/312 ( 64%)] Loss: 5.89 (6.00) Time: 0.418s, 2451.30/s (0.417s, 2453.43/s) LR: 3.567e-01 Data: 0.028 (0.033) +Train: 32 [ 250/312 ( 80%)] Loss: 6.05 (6.01) Time: 0.420s, 2438.36/s (0.418s, 2446.87/s) LR: 3.567e-01 Data: 0.027 (0.032) +Train: 32 [ 300/312 ( 96%)] Loss: 5.99 (6.01) Time: 0.412s, 2486.99/s (0.418s, 2449.35/s) LR: 3.567e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.550 (1.550) Loss: 4.452 ( 4.452) Acc@1: 26.172 ( 26.172) Acc@5: 47.363 ( 47.363) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.292 ( 4.429) Acc@1: 26.533 ( 25.062) Acc@5: 49.646 ( 48.062) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-30.pth.tar', 21.74200001037598) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-23.pth.tar', 20.880000010375976) + +Train: 33 [ 0/312 ( 0%)] Loss: 5.93 (5.93) Time: 1.680s, 609.65/s (1.680s, 609.65/s) LR: 3.541e-01 Data: 1.302 (1.302) +Train: 33 [ 50/312 ( 16%)] Loss: 5.93 (5.94) Time: 0.422s, 2428.07/s (0.440s, 2328.20/s) LR: 3.541e-01 Data: 0.027 (0.053) +Train: 33 [ 100/312 ( 32%)] Loss: 5.96 (5.96) Time: 0.422s, 2427.43/s (0.431s, 2376.40/s) LR: 3.541e-01 Data: 0.026 (0.040) +Train: 33 [ 150/312 ( 48%)] Loss: 6.06 (5.97) Time: 0.416s, 2463.16/s (0.426s, 2406.26/s) LR: 3.541e-01 Data: 0.028 (0.036) +Train: 33 [ 200/312 ( 64%)] Loss: 5.98 (5.98) Time: 0.417s, 2456.40/s (0.423s, 2423.15/s) LR: 3.541e-01 Data: 0.029 (0.034) +Train: 33 [ 250/312 ( 80%)] Loss: 5.96 (5.99) Time: 0.424s, 2413.72/s (0.422s, 2427.51/s) LR: 3.541e-01 Data: 0.028 (0.032) +Train: 33 [ 300/312 ( 96%)] Loss: 6.09 (5.99) Time: 0.420s, 2435.68/s (0.423s, 2423.52/s) LR: 3.541e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.420 (1.420) Loss: 4.409 ( 4.409) Acc@1: 24.414 ( 24.414) Acc@5: 46.387 ( 46.387) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.279 ( 4.378) Acc@1: 25.118 ( 24.748) Acc@5: 48.231 ( 48.030) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-30.pth.tar', 21.74200001037598) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-25.pth.tar', 21.076000007324218) + +Train: 34 [ 0/312 ( 0%)] Loss: 5.91 (5.91) Time: 1.927s, 531.40/s (1.927s, 531.40/s) LR: 3.514e-01 Data: 1.553 (1.553) +Train: 34 [ 50/312 ( 16%)] Loss: 6.03 (5.91) Time: 0.406s, 2525.11/s (0.436s, 2348.28/s) LR: 3.514e-01 Data: 0.025 (0.057) +Train: 34 [ 100/312 ( 32%)] Loss: 6.01 (5.93) Time: 0.407s, 2513.25/s (0.421s, 2430.31/s) LR: 3.514e-01 Data: 0.028 (0.042) +Train: 34 [ 150/312 ( 48%)] Loss: 5.93 (5.94) Time: 0.409s, 2503.64/s (0.417s, 2456.19/s) LR: 3.514e-01 Data: 0.029 (0.037) +Train: 34 [ 200/312 ( 64%)] Loss: 5.95 (5.95) Time: 0.421s, 2434.95/s (0.416s, 2460.77/s) LR: 3.514e-01 Data: 0.027 (0.035) +Train: 34 [ 250/312 ( 80%)] Loss: 5.92 (5.96) Time: 0.424s, 2413.75/s (0.418s, 2452.41/s) LR: 3.514e-01 Data: 0.030 (0.034) +Train: 34 [ 300/312 ( 96%)] Loss: 5.97 (5.97) Time: 0.423s, 2422.14/s (0.418s, 2448.12/s) LR: 3.514e-01 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.438 (1.438) Loss: 4.266 ( 4.266) Acc@1: 25.098 ( 25.098) Acc@5: 51.172 ( 51.172) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.181 ( 4.329) Acc@1: 26.297 ( 25.422) Acc@5: 53.066 ( 49.298) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-30.pth.tar', 21.74200001037598) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-27.pth.tar', 21.125999998779296) + +Train: 35 [ 0/312 ( 0%)] Loss: 5.88 (5.88) Time: 1.585s, 646.18/s (1.585s, 646.18/s) LR: 3.486e-01 Data: 1.204 (1.204) +Train: 35 [ 50/312 ( 16%)] Loss: 5.87 (5.90) Time: 0.423s, 2422.79/s (0.443s, 2311.16/s) LR: 3.486e-01 Data: 0.026 (0.050) +Train: 35 [ 100/312 ( 32%)] Loss: 5.90 (5.91) Time: 0.422s, 2426.98/s (0.432s, 2372.67/s) LR: 3.486e-01 Data: 0.028 (0.039) +Train: 35 [ 150/312 ( 48%)] Loss: 5.97 (5.92) Time: 0.424s, 2416.05/s (0.428s, 2390.18/s) LR: 3.486e-01 Data: 0.027 (0.035) +Train: 35 [ 200/312 ( 64%)] Loss: 5.92 (5.93) Time: 0.408s, 2506.79/s (0.425s, 2412.20/s) LR: 3.486e-01 Data: 0.026 (0.033) +Train: 35 [ 250/312 ( 80%)] Loss: 5.98 (5.94) Time: 0.405s, 2526.02/s (0.421s, 2431.50/s) LR: 3.486e-01 Data: 0.026 (0.032) +Train: 35 [ 300/312 ( 96%)] Loss: 5.95 (5.95) Time: 0.407s, 2513.58/s (0.419s, 2445.32/s) LR: 3.486e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.455 (1.455) Loss: 4.183 ( 4.183) Acc@1: 28.027 ( 28.027) Acc@5: 50.391 ( 50.391) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.115 ( 4.251) Acc@1: 29.599 ( 26.840) Acc@5: 52.594 ( 50.132) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-30.pth.tar', 21.74200001037598) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-24.pth.tar', 21.137999993286133) + +Train: 36 [ 0/312 ( 0%)] Loss: 5.85 (5.85) Time: 1.778s, 576.02/s (1.778s, 576.02/s) LR: 3.458e-01 Data: 1.406 (1.406) +Train: 36 [ 50/312 ( 16%)] Loss: 5.96 (5.87) Time: 0.410s, 2496.10/s (0.436s, 2350.34/s) LR: 3.458e-01 Data: 0.026 (0.055) +Train: 36 [ 100/312 ( 32%)] Loss: 5.95 (5.89) Time: 0.422s, 2427.62/s (0.425s, 2406.59/s) LR: 3.458e-01 Data: 0.028 (0.041) +Train: 36 [ 150/312 ( 48%)] Loss: 5.92 (5.90) Time: 0.413s, 2477.36/s (0.423s, 2421.91/s) LR: 3.458e-01 Data: 0.027 (0.036) +Train: 36 [ 200/312 ( 64%)] Loss: 6.03 (5.91) Time: 0.416s, 2458.81/s (0.421s, 2432.89/s) LR: 3.458e-01 Data: 0.026 (0.034) +Train: 36 [ 250/312 ( 80%)] Loss: 5.98 (5.91) Time: 0.422s, 2428.28/s (0.421s, 2431.98/s) LR: 3.458e-01 Data: 0.027 (0.033) +Train: 36 [ 300/312 ( 96%)] Loss: 5.94 (5.92) Time: 0.412s, 2486.71/s (0.421s, 2434.37/s) LR: 3.458e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 4.268 ( 4.268) Acc@1: 26.562 ( 26.562) Acc@5: 50.098 ( 50.098) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.182 ( 4.306) Acc@1: 29.599 ( 26.862) Acc@5: 52.123 ( 50.416) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-30.pth.tar', 21.74200001037598) + +Train: 37 [ 0/312 ( 0%)] Loss: 5.83 (5.83) Time: 1.759s, 582.08/s (1.759s, 582.08/s) LR: 3.429e-01 Data: 1.384 (1.384) +Train: 37 [ 50/312 ( 16%)] Loss: 5.87 (5.84) Time: 0.415s, 2464.81/s (0.437s, 2341.05/s) LR: 3.429e-01 Data: 0.027 (0.054) +Train: 37 [ 100/312 ( 32%)] Loss: 5.89 (5.86) Time: 0.423s, 2421.04/s (0.427s, 2395.46/s) LR: 3.429e-01 Data: 0.030 (0.041) +Train: 37 [ 150/312 ( 48%)] Loss: 5.99 (5.88) Time: 0.420s, 2436.06/s (0.425s, 2409.18/s) LR: 3.429e-01 Data: 0.028 (0.036) +Train: 37 [ 200/312 ( 64%)] Loss: 5.96 (5.89) Time: 0.410s, 2495.70/s (0.422s, 2427.60/s) LR: 3.429e-01 Data: 0.027 (0.034) +Train: 37 [ 250/312 ( 80%)] Loss: 5.96 (5.90) Time: 0.412s, 2483.38/s (0.420s, 2439.39/s) LR: 3.429e-01 Data: 0.028 (0.033) +Train: 37 [ 300/312 ( 96%)] Loss: 5.88 (5.91) Time: 0.423s, 2421.86/s (0.419s, 2442.33/s) LR: 3.429e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.445 (1.445) Loss: 4.343 ( 4.343) Acc@1: 26.367 ( 26.367) Acc@5: 48.633 ( 48.633) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.176 ( 4.332) Acc@1: 28.184 ( 25.720) Acc@5: 51.297 ( 48.910) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-26.pth.tar', 23.350000003051758) + +Train: 38 [ 0/312 ( 0%)] Loss: 5.73 (5.73) Time: 1.725s, 593.46/s (1.725s, 593.46/s) LR: 3.399e-01 Data: 1.343 (1.343) +Train: 38 [ 50/312 ( 16%)] Loss: 5.84 (5.82) Time: 0.422s, 2426.21/s (0.446s, 2297.82/s) LR: 3.399e-01 Data: 0.027 (0.053) +Train: 38 [ 100/312 ( 32%)] Loss: 5.89 (5.84) Time: 0.409s, 2501.15/s (0.431s, 2377.37/s) LR: 3.399e-01 Data: 0.028 (0.040) +Train: 38 [ 150/312 ( 48%)] Loss: 5.87 (5.85) Time: 0.407s, 2514.72/s (0.424s, 2417.83/s) LR: 3.399e-01 Data: 0.029 (0.036) +Train: 38 [ 200/312 ( 64%)] Loss: 5.89 (5.87) Time: 0.407s, 2516.51/s (0.420s, 2439.59/s) LR: 3.399e-01 Data: 0.027 (0.034) +Train: 38 [ 250/312 ( 80%)] Loss: 6.00 (5.88) Time: 0.413s, 2481.60/s (0.418s, 2450.77/s) LR: 3.399e-01 Data: 0.026 (0.033) +Train: 38 [ 300/312 ( 96%)] Loss: 5.89 (5.88) Time: 0.427s, 2400.05/s (0.418s, 2450.14/s) LR: 3.399e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.426 (1.426) Loss: 4.291 ( 4.291) Acc@1: 25.098 ( 25.098) Acc@5: 49.121 ( 49.121) +Test: [ 48/48] Time: 0.093 (0.331) Loss: 4.171 ( 4.302) Acc@1: 28.420 ( 25.624) Acc@5: 51.887 ( 48.800) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-38.pth.tar', 25.623999998779297) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-28.pth.tar', 23.511999995117186) + +Train: 39 [ 0/312 ( 0%)] Loss: 5.70 (5.70) Time: 1.906s, 537.20/s (1.906s, 537.20/s) LR: 3.369e-01 Data: 1.270 (1.270) +Train: 39 [ 50/312 ( 16%)] Loss: 5.85 (5.81) Time: 0.412s, 2487.92/s (0.447s, 2288.86/s) LR: 3.369e-01 Data: 0.026 (0.052) +Train: 39 [ 100/312 ( 32%)] Loss: 5.85 (5.82) Time: 0.408s, 2509.29/s (0.429s, 2386.39/s) LR: 3.369e-01 Data: 0.027 (0.040) +Train: 39 [ 150/312 ( 48%)] Loss: 5.90 (5.83) Time: 0.407s, 2516.68/s (0.422s, 2425.15/s) LR: 3.369e-01 Data: 0.027 (0.036) +Train: 39 [ 200/312 ( 64%)] Loss: 5.80 (5.84) Time: 0.411s, 2489.96/s (0.419s, 2444.64/s) LR: 3.369e-01 Data: 0.028 (0.034) +Train: 39 [ 250/312 ( 80%)] Loss: 5.82 (5.85) Time: 0.414s, 2472.70/s (0.418s, 2451.04/s) LR: 3.369e-01 Data: 0.026 (0.033) +Train: 39 [ 300/312 ( 96%)] Loss: 5.89 (5.86) Time: 0.423s, 2418.88/s (0.418s, 2448.57/s) LR: 3.369e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.449 (1.449) Loss: 4.150 ( 4.150) Acc@1: 29.297 ( 29.297) Acc@5: 52.539 ( 52.539) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.030 ( 4.187) Acc@1: 31.250 ( 28.200) Acc@5: 53.774 ( 51.884) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-38.pth.tar', 25.623999998779297) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-31.pth.tar', 24.37600004333496) + +Train: 40 [ 0/312 ( 0%)] Loss: 5.71 (5.71) Time: 1.545s, 662.76/s (1.545s, 662.76/s) LR: 3.338e-01 Data: 1.163 (1.163) +Train: 40 [ 50/312 ( 16%)] Loss: 5.83 (5.79) Time: 0.419s, 2441.70/s (0.441s, 2322.12/s) LR: 3.338e-01 Data: 0.026 (0.050) +Train: 40 [ 100/312 ( 32%)] Loss: 5.77 (5.80) Time: 0.419s, 2445.74/s (0.430s, 2379.21/s) LR: 3.338e-01 Data: 0.029 (0.039) +Train: 40 [ 150/312 ( 48%)] Loss: 5.87 (5.82) Time: 0.415s, 2466.27/s (0.426s, 2403.02/s) LR: 3.338e-01 Data: 0.028 (0.035) +Train: 40 [ 200/312 ( 64%)] Loss: 5.91 (5.83) Time: 0.423s, 2422.08/s (0.424s, 2414.04/s) LR: 3.338e-01 Data: 0.027 (0.033) +Train: 40 [ 250/312 ( 80%)] Loss: 5.89 (5.83) Time: 0.421s, 2431.90/s (0.423s, 2418.61/s) LR: 3.338e-01 Data: 0.028 (0.032) +Train: 40 [ 300/312 ( 96%)] Loss: 5.90 (5.84) Time: 0.424s, 2414.54/s (0.423s, 2421.98/s) LR: 3.338e-01 Data: 0.030 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.443 (1.443) Loss: 4.172 ( 4.172) Acc@1: 28.809 ( 28.809) Acc@5: 51.953 ( 51.953) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.121 ( 4.218) Acc@1: 31.132 ( 27.964) Acc@5: 53.184 ( 51.704) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-38.pth.tar', 25.623999998779297) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-33.pth.tar', 24.748000018920898) + +Train: 41 [ 0/312 ( 0%)] Loss: 5.66 (5.66) Time: 1.714s, 597.52/s (1.714s, 597.52/s) LR: 3.307e-01 Data: 1.336 (1.336) +Train: 41 [ 50/312 ( 16%)] Loss: 5.74 (5.75) Time: 0.420s, 2440.36/s (0.444s, 2308.05/s) LR: 3.307e-01 Data: 0.027 (0.054) +Train: 41 [ 100/312 ( 32%)] Loss: 5.85 (5.77) Time: 0.419s, 2444.35/s (0.431s, 2376.50/s) LR: 3.307e-01 Data: 0.027 (0.041) +Train: 41 [ 150/312 ( 48%)] Loss: 5.90 (5.79) Time: 0.417s, 2456.73/s (0.426s, 2401.97/s) LR: 3.307e-01 Data: 0.028 (0.036) +Train: 41 [ 200/312 ( 64%)] Loss: 5.83 (5.80) Time: 0.423s, 2421.83/s (0.425s, 2409.30/s) LR: 3.307e-01 Data: 0.028 (0.034) +Train: 41 [ 250/312 ( 80%)] Loss: 5.76 (5.81) Time: 0.423s, 2418.73/s (0.425s, 2411.88/s) LR: 3.307e-01 Data: 0.028 (0.033) +Train: 41 [ 300/312 ( 96%)] Loss: 5.85 (5.82) Time: 0.417s, 2455.16/s (0.424s, 2414.19/s) LR: 3.307e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.453 (1.453) Loss: 4.087 ( 4.087) Acc@1: 28.906 ( 28.906) Acc@5: 53.027 ( 53.027) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 3.949 ( 4.104) Acc@1: 35.024 ( 29.650) Acc@5: 56.486 ( 53.756) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-38.pth.tar', 25.623999998779297) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-32.pth.tar', 25.06200001953125) + +Train: 42 [ 0/312 ( 0%)] Loss: 5.69 (5.69) Time: 1.563s, 655.20/s (1.563s, 655.20/s) LR: 3.275e-01 Data: 1.184 (1.184) +Train: 42 [ 50/312 ( 16%)] Loss: 5.83 (5.74) Time: 0.419s, 2442.03/s (0.441s, 2324.43/s) LR: 3.275e-01 Data: 0.028 (0.050) +Train: 42 [ 100/312 ( 32%)] Loss: 5.67 (5.75) Time: 0.419s, 2441.60/s (0.431s, 2377.53/s) LR: 3.275e-01 Data: 0.027 (0.039) +Train: 42 [ 150/312 ( 48%)] Loss: 5.79 (5.76) Time: 0.421s, 2430.09/s (0.428s, 2394.30/s) LR: 3.275e-01 Data: 0.027 (0.035) +Train: 42 [ 200/312 ( 64%)] Loss: 5.76 (5.78) Time: 0.416s, 2463.60/s (0.426s, 2402.86/s) LR: 3.275e-01 Data: 0.026 (0.033) +Train: 42 [ 250/312 ( 80%)] Loss: 5.68 (5.79) Time: 0.421s, 2435.19/s (0.425s, 2412.08/s) LR: 3.275e-01 Data: 0.026 (0.032) +Train: 42 [ 300/312 ( 96%)] Loss: 5.88 (5.80) Time: 0.423s, 2421.37/s (0.424s, 2415.06/s) LR: 3.275e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.445 (1.445) Loss: 4.123 ( 4.123) Acc@1: 28.613 ( 28.613) Acc@5: 52.441 ( 52.441) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.024 ( 4.151) Acc@1: 31.014 ( 28.770) Acc@5: 54.009 ( 52.248) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-38.pth.tar', 25.623999998779297) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-29.pth.tar', 25.094000006713866) + +Train: 43 [ 0/312 ( 0%)] Loss: 5.67 (5.67) Time: 1.638s, 624.97/s (1.638s, 624.97/s) LR: 3.242e-01 Data: 1.076 (1.076) +Train: 43 [ 50/312 ( 16%)] Loss: 5.67 (5.70) Time: 0.418s, 2452.22/s (0.444s, 2306.61/s) LR: 3.242e-01 Data: 0.027 (0.048) +Train: 43 [ 100/312 ( 32%)] Loss: 5.80 (5.73) Time: 0.417s, 2455.56/s (0.430s, 2378.80/s) LR: 3.242e-01 Data: 0.028 (0.038) +Train: 43 [ 150/312 ( 48%)] Loss: 5.81 (5.74) Time: 0.418s, 2448.38/s (0.426s, 2404.51/s) LR: 3.242e-01 Data: 0.027 (0.035) +Train: 43 [ 200/312 ( 64%)] Loss: 5.77 (5.76) Time: 0.418s, 2452.28/s (0.425s, 2411.67/s) LR: 3.242e-01 Data: 0.028 (0.033) +Train: 43 [ 250/312 ( 80%)] Loss: 5.74 (5.77) Time: 0.423s, 2421.37/s (0.424s, 2415.15/s) LR: 3.242e-01 Data: 0.031 (0.032) +Train: 43 [ 300/312 ( 96%)] Loss: 5.89 (5.78) Time: 0.425s, 2408.54/s (0.424s, 2416.69/s) LR: 3.242e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 4.138 ( 4.138) Acc@1: 29.590 ( 29.590) Acc@5: 51.855 ( 51.855) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.005 ( 4.163) Acc@1: 32.429 ( 28.966) Acc@5: 54.481 ( 52.308) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-38.pth.tar', 25.623999998779297) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-34.pth.tar', 25.422000014038087) + +Train: 44 [ 0/312 ( 0%)] Loss: 5.69 (5.69) Time: 1.583s, 646.85/s (1.583s, 646.85/s) LR: 3.209e-01 Data: 1.159 (1.159) +Train: 44 [ 50/312 ( 16%)] Loss: 5.83 (5.69) Time: 0.419s, 2445.50/s (0.446s, 2295.83/s) LR: 3.209e-01 Data: 0.028 (0.055) +Train: 44 [ 100/312 ( 32%)] Loss: 5.65 (5.71) Time: 0.414s, 2476.02/s (0.430s, 2379.88/s) LR: 3.209e-01 Data: 0.028 (0.041) +Train: 44 [ 150/312 ( 48%)] Loss: 5.80 (5.72) Time: 0.419s, 2441.49/s (0.425s, 2407.01/s) LR: 3.209e-01 Data: 0.028 (0.037) +Train: 44 [ 200/312 ( 64%)] Loss: 5.74 (5.74) Time: 0.416s, 2461.75/s (0.424s, 2414.97/s) LR: 3.209e-01 Data: 0.028 (0.034) +Train: 44 [ 250/312 ( 80%)] Loss: 5.83 (5.75) Time: 0.417s, 2455.92/s (0.422s, 2423.94/s) LR: 3.209e-01 Data: 0.028 (0.033) +Train: 44 [ 300/312 ( 96%)] Loss: 5.76 (5.76) Time: 0.422s, 2424.99/s (0.422s, 2427.99/s) LR: 3.209e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.426 (1.426) Loss: 4.154 ( 4.154) Acc@1: 27.832 ( 27.832) Acc@5: 50.977 ( 50.977) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 3.995 ( 4.140) Acc@1: 32.429 ( 29.050) Acc@5: 54.835 ( 52.724) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-38.pth.tar', 25.623999998779297) + +Train: 45 [ 0/312 ( 0%)] Loss: 5.74 (5.74) Time: 1.497s, 683.91/s (1.497s, 683.91/s) LR: 3.176e-01 Data: 1.120 (1.120) +Train: 45 [ 50/312 ( 16%)] Loss: 5.57 (5.65) Time: 0.414s, 2472.48/s (0.438s, 2335.83/s) LR: 3.176e-01 Data: 0.027 (0.049) +Train: 45 [ 100/312 ( 32%)] Loss: 5.84 (5.68) Time: 0.419s, 2444.29/s (0.426s, 2400.98/s) LR: 3.176e-01 Data: 0.033 (0.038) +Train: 45 [ 150/312 ( 48%)] Loss: 5.77 (5.70) Time: 0.424s, 2417.78/s (0.423s, 2419.80/s) LR: 3.176e-01 Data: 0.029 (0.034) +Train: 45 [ 200/312 ( 64%)] Loss: 5.77 (5.71) Time: 0.418s, 2448.99/s (0.423s, 2423.02/s) LR: 3.176e-01 Data: 0.028 (0.033) +Train: 45 [ 250/312 ( 80%)] Loss: 5.85 (5.72) Time: 0.417s, 2453.28/s (0.421s, 2431.38/s) LR: 3.176e-01 Data: 0.028 (0.032) +Train: 45 [ 300/312 ( 96%)] Loss: 5.73 (5.73) Time: 0.417s, 2453.10/s (0.420s, 2435.39/s) LR: 3.176e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.439 (1.439) Loss: 4.148 ( 4.148) Acc@1: 28.418 ( 28.418) Acc@5: 50.781 ( 50.781) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 3.967 ( 4.125) Acc@1: 32.075 ( 29.762) Acc@5: 55.542 ( 52.882) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-37.pth.tar', 25.720000025634764) + +Train: 46 [ 0/312 ( 0%)] Loss: 5.57 (5.57) Time: 1.608s, 636.78/s (1.608s, 636.78/s) LR: 3.141e-01 Data: 1.228 (1.228) +Train: 46 [ 50/312 ( 16%)] Loss: 5.54 (5.63) Time: 0.423s, 2421.73/s (0.439s, 2333.97/s) LR: 3.141e-01 Data: 0.028 (0.050) +Train: 46 [ 100/312 ( 32%)] Loss: 5.73 (5.66) Time: 0.422s, 2426.52/s (0.429s, 2386.95/s) LR: 3.141e-01 Data: 0.027 (0.039) +Train: 46 [ 150/312 ( 48%)] Loss: 5.68 (5.67) Time: 0.424s, 2414.24/s (0.426s, 2403.75/s) LR: 3.141e-01 Data: 0.029 (0.035) +Train: 46 [ 200/312 ( 64%)] Loss: 5.78 (5.69) Time: 0.416s, 2463.50/s (0.425s, 2412.02/s) LR: 3.141e-01 Data: 0.027 (0.034) +Train: 46 [ 250/312 ( 80%)] Loss: 5.80 (5.70) Time: 0.420s, 2437.78/s (0.424s, 2416.33/s) LR: 3.141e-01 Data: 0.024 (0.032) +Train: 46 [ 300/312 ( 96%)] Loss: 5.70 (5.71) Time: 0.421s, 2430.56/s (0.423s, 2419.52/s) LR: 3.141e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 4.091 ( 4.091) Acc@1: 30.078 ( 30.078) Acc@5: 53.223 ( 53.223) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 3.980 ( 4.119) Acc@1: 33.844 ( 29.864) Acc@5: 55.071 ( 53.242) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-35.pth.tar', 26.839999993896484) + +Train: 47 [ 0/312 ( 0%)] Loss: 5.61 (5.61) Time: 1.729s, 592.08/s (1.729s, 592.08/s) LR: 3.107e-01 Data: 1.350 (1.350) +Train: 47 [ 50/312 ( 16%)] Loss: 5.57 (5.60) Time: 0.413s, 2477.96/s (0.440s, 2325.58/s) LR: 3.107e-01 Data: 0.027 (0.053) +Train: 47 [ 100/312 ( 32%)] Loss: 5.71 (5.63) Time: 0.419s, 2441.39/s (0.430s, 2383.79/s) LR: 3.107e-01 Data: 0.027 (0.040) +Train: 47 [ 150/312 ( 48%)] Loss: 5.62 (5.65) Time: 0.420s, 2436.70/s (0.426s, 2401.77/s) LR: 3.107e-01 Data: 0.028 (0.036) +Train: 47 [ 200/312 ( 64%)] Loss: 5.68 (5.66) Time: 0.422s, 2424.21/s (0.425s, 2410.73/s) LR: 3.107e-01 Data: 0.028 (0.034) +Train: 47 [ 250/312 ( 80%)] Loss: 5.78 (5.68) Time: 0.422s, 2423.91/s (0.424s, 2415.75/s) LR: 3.107e-01 Data: 0.027 (0.032) +Train: 47 [ 300/312 ( 96%)] Loss: 5.72 (5.69) Time: 0.420s, 2440.98/s (0.423s, 2418.56/s) LR: 3.107e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.439 (1.439) Loss: 4.065 ( 4.065) Acc@1: 28.906 ( 28.906) Acc@5: 54.688 ( 54.688) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 3.952 ( 4.100) Acc@1: 32.901 ( 29.398) Acc@5: 54.245 ( 52.714) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-36.pth.tar', 26.862000026245116) + +Train: 48 [ 0/312 ( 0%)] Loss: 5.50 (5.50) Time: 1.607s, 637.15/s (1.607s, 637.15/s) LR: 3.072e-01 Data: 1.161 (1.161) +Train: 48 [ 50/312 ( 16%)] Loss: 5.62 (5.58) Time: 0.408s, 2510.43/s (0.436s, 2351.29/s) LR: 3.072e-01 Data: 0.028 (0.050) +Train: 48 [ 100/312 ( 32%)] Loss: 5.67 (5.61) Time: 0.413s, 2477.67/s (0.422s, 2427.40/s) LR: 3.072e-01 Data: 0.033 (0.039) +Train: 48 [ 150/312 ( 48%)] Loss: 5.64 (5.63) Time: 0.414s, 2474.96/s (0.417s, 2454.84/s) LR: 3.072e-01 Data: 0.028 (0.035) +Train: 48 [ 200/312 ( 64%)] Loss: 5.78 (5.64) Time: 0.415s, 2464.82/s (0.415s, 2466.32/s) LR: 3.072e-01 Data: 0.029 (0.033) +Train: 48 [ 250/312 ( 80%)] Loss: 5.67 (5.66) Time: 0.424s, 2415.58/s (0.416s, 2463.57/s) LR: 3.072e-01 Data: 0.028 (0.032) +Train: 48 [ 300/312 ( 96%)] Loss: 5.73 (5.67) Time: 0.419s, 2445.12/s (0.416s, 2458.98/s) LR: 3.072e-01 Data: 0.025 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.465 (1.465) Loss: 4.167 ( 4.167) Acc@1: 28.516 ( 28.516) Acc@5: 50.977 ( 50.977) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.101 ( 4.176) Acc@1: 31.014 ( 28.848) Acc@5: 52.594 ( 51.886) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-48.pth.tar', 28.84800002685547) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-40.pth.tar', 27.964000013427736) + +Train: 49 [ 0/312 ( 0%)] Loss: 5.56 (5.56) Time: 1.533s, 667.92/s (1.533s, 667.92/s) LR: 3.036e-01 Data: 1.158 (1.158) +Train: 49 [ 50/312 ( 16%)] Loss: 5.58 (5.56) Time: 0.415s, 2467.64/s (0.433s, 2364.29/s) LR: 3.036e-01 Data: 0.025 (0.049) +Train: 49 [ 100/312 ( 32%)] Loss: 5.63 (5.59) Time: 0.426s, 2401.41/s (0.426s, 2405.56/s) LR: 3.036e-01 Data: 0.028 (0.039) +Train: 49 [ 150/312 ( 48%)] Loss: 5.59 (5.61) Time: 0.423s, 2423.32/s (0.424s, 2412.28/s) LR: 3.036e-01 Data: 0.027 (0.035) +Train: 49 [ 200/312 ( 64%)] Loss: 5.66 (5.62) Time: 0.414s, 2475.28/s (0.424s, 2416.75/s) LR: 3.036e-01 Data: 0.028 (0.033) +Train: 49 [ 250/312 ( 80%)] Loss: 5.79 (5.64) Time: 0.408s, 2508.90/s (0.421s, 2429.59/s) LR: 3.036e-01 Data: 0.028 (0.032) +Train: 49 [ 300/312 ( 96%)] Loss: 5.80 (5.65) Time: 0.409s, 2504.80/s (0.420s, 2440.89/s) LR: 3.036e-01 Data: 0.025 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.421 (1.421) Loss: 4.208 ( 4.208) Acc@1: 28.418 ( 28.418) Acc@5: 51.855 ( 51.855) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.079 ( 4.227) Acc@1: 31.486 ( 28.684) Acc@5: 54.717 ( 51.314) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-48.pth.tar', 28.84800002685547) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-49.pth.tar', 28.6840000378418) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-39.pth.tar', 28.2) + +Train: 50 [ 0/312 ( 0%)] Loss: 5.50 (5.50) Time: 1.484s, 690.07/s (1.484s, 690.07/s) LR: 3.000e-01 Data: 1.084 (1.084) +Train: 50 [ 50/312 ( 16%)] Loss: 5.58 (5.53) Time: 0.419s, 2446.44/s (0.436s, 2349.38/s) LR: 3.000e-01 Data: 0.028 (0.048) +Train: 50 [ 100/312 ( 32%)] Loss: 5.70 (5.56) Time: 0.416s, 2463.03/s (0.429s, 2386.78/s) LR: 3.000e-01 Data: 0.028 (0.038) +Train: 50 [ 150/312 ( 48%)] Loss: 5.58 (5.58) Time: 0.410s, 2495.04/s (0.424s, 2414.44/s) LR: 3.000e-01 Data: 0.026 (0.035) +Train: 50 [ 200/312 ( 64%)] Loss: 5.65 (5.60) Time: 0.412s, 2487.84/s (0.421s, 2432.42/s) LR: 3.000e-01 Data: 0.028 (0.033) +Train: 50 [ 250/312 ( 80%)] Loss: 5.68 (5.61) Time: 0.416s, 2462.83/s (0.420s, 2439.99/s) LR: 3.000e-01 Data: 0.027 (0.032) +Train: 50 [ 300/312 ( 96%)] Loss: 5.64 (5.62) Time: 0.425s, 2409.91/s (0.420s, 2437.41/s) LR: 3.000e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.536 (1.536) Loss: 4.087 ( 4.087) Acc@1: 30.664 ( 30.664) Acc@5: 52.344 ( 52.344) +Test: [ 48/48] Time: 0.092 (0.334) Loss: 3.974 ( 4.127) Acc@1: 31.486 ( 29.296) Acc@5: 53.538 ( 51.734) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-48.pth.tar', 28.84800002685547) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-49.pth.tar', 28.6840000378418) + +Train: 51 [ 0/312 ( 0%)] Loss: 5.43 (5.43) Time: 1.885s, 543.10/s (1.885s, 543.10/s) LR: 2.964e-01 Data: 1.502 (1.502) +Train: 51 [ 50/312 ( 16%)] Loss: 5.54 (5.50) Time: 0.419s, 2442.32/s (0.449s, 2279.76/s) LR: 2.964e-01 Data: 0.027 (0.056) +Train: 51 [ 100/312 ( 32%)] Loss: 5.64 (5.54) Time: 0.421s, 2434.13/s (0.434s, 2356.91/s) LR: 2.964e-01 Data: 0.028 (0.042) +Train: 51 [ 150/312 ( 48%)] Loss: 5.60 (5.56) Time: 0.415s, 2466.40/s (0.429s, 2388.14/s) LR: 2.964e-01 Data: 0.027 (0.037) +Train: 51 [ 200/312 ( 64%)] Loss: 5.63 (5.58) Time: 0.420s, 2437.22/s (0.426s, 2402.52/s) LR: 2.964e-01 Data: 0.029 (0.035) +Train: 51 [ 250/312 ( 80%)] Loss: 5.73 (5.59) Time: 0.419s, 2441.89/s (0.425s, 2410.52/s) LR: 2.964e-01 Data: 0.028 (0.033) +Train: 51 [ 300/312 ( 96%)] Loss: 5.65 (5.60) Time: 0.420s, 2436.63/s (0.424s, 2416.77/s) LR: 2.964e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 4.131 ( 4.131) Acc@1: 29.004 ( 29.004) Acc@5: 50.879 ( 50.879) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.003 ( 4.156) Acc@1: 31.958 ( 29.074) Acc@5: 54.717 ( 51.820) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-51.pth.tar', 29.07399998413086) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-48.pth.tar', 28.84800002685547) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-42.pth.tar', 28.769999994506836) + +Train: 52 [ 0/312 ( 0%)] Loss: 5.49 (5.49) Time: 1.471s, 696.06/s (1.471s, 696.06/s) LR: 2.927e-01 Data: 1.068 (1.068) +Train: 52 [ 50/312 ( 16%)] Loss: 5.50 (5.49) Time: 0.417s, 2457.77/s (0.439s, 2330.89/s) LR: 2.927e-01 Data: 0.028 (0.048) +Train: 52 [ 100/312 ( 32%)] Loss: 5.65 (5.52) Time: 0.421s, 2432.65/s (0.429s, 2385.56/s) LR: 2.927e-01 Data: 0.028 (0.038) +Train: 52 [ 150/312 ( 48%)] Loss: 5.59 (5.54) Time: 0.421s, 2433.01/s (0.425s, 2409.37/s) LR: 2.927e-01 Data: 0.033 (0.034) +Train: 52 [ 200/312 ( 64%)] Loss: 5.62 (5.55) Time: 0.417s, 2456.89/s (0.424s, 2417.86/s) LR: 2.927e-01 Data: 0.026 (0.033) +Train: 52 [ 250/312 ( 80%)] Loss: 5.62 (5.57) Time: 0.421s, 2430.17/s (0.423s, 2421.37/s) LR: 2.927e-01 Data: 0.027 (0.032) +Train: 52 [ 300/312 ( 96%)] Loss: 5.65 (5.58) Time: 0.416s, 2460.75/s (0.422s, 2426.43/s) LR: 2.927e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 4.158 ( 4.158) Acc@1: 30.371 ( 30.371) Acc@5: 48.633 ( 48.633) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.034 ( 4.161) Acc@1: 29.835 ( 28.746) Acc@5: 53.774 ( 51.190) +Train: 53 [ 0/312 ( 0%)] Loss: 5.50 (5.50) Time: 1.770s, 578.37/s (1.770s, 578.37/s) LR: 2.889e-01 Data: 1.196 (1.196) +Train: 53 [ 50/312 ( 16%)] Loss: 5.41 (5.47) Time: 0.418s, 2447.96/s (0.450s, 2276.37/s) LR: 2.889e-01 Data: 0.027 (0.050) +Train: 53 [ 100/312 ( 32%)] Loss: 5.63 (5.49) Time: 0.420s, 2439.24/s (0.434s, 2358.54/s) LR: 2.889e-01 Data: 0.027 (0.039) +Train: 53 [ 150/312 ( 48%)] Loss: 5.48 (5.51) Time: 0.425s, 2410.46/s (0.429s, 2384.26/s) LR: 2.889e-01 Data: 0.030 (0.035) +Train: 53 [ 200/312 ( 64%)] Loss: 5.57 (5.53) Time: 0.421s, 2434.25/s (0.427s, 2397.73/s) LR: 2.889e-01 Data: 0.028 (0.034) +Train: 53 [ 250/312 ( 80%)] Loss: 5.69 (5.55) Time: 0.418s, 2449.67/s (0.425s, 2409.47/s) LR: 2.889e-01 Data: 0.026 (0.032) +Train: 53 [ 300/312 ( 96%)] Loss: 5.62 (5.56) Time: 0.419s, 2442.06/s (0.424s, 2416.12/s) LR: 2.889e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 4.156 ( 4.156) Acc@1: 29.199 ( 29.199) Acc@5: 51.855 ( 51.855) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.095 ( 4.241) Acc@1: 29.953 ( 28.058) Acc@5: 51.533 ( 50.408) +Train: 54 [ 0/312 ( 0%)] Loss: 5.43 (5.43) Time: 2.318s, 441.72/s (2.318s, 441.72/s) LR: 2.852e-01 Data: 1.436 (1.436) +Train: 54 [ 50/312 ( 16%)] Loss: 5.46 (5.44) Time: 0.421s, 2434.81/s (0.457s, 2242.10/s) LR: 2.852e-01 Data: 0.029 (0.055) +Train: 54 [ 100/312 ( 32%)] Loss: 5.47 (5.46) Time: 0.424s, 2415.57/s (0.439s, 2331.43/s) LR: 2.852e-01 Data: 0.029 (0.041) +Train: 54 [ 150/312 ( 48%)] Loss: 5.59 (5.49) Time: 0.416s, 2460.26/s (0.432s, 2369.64/s) LR: 2.852e-01 Data: 0.029 (0.037) +Train: 54 [ 200/312 ( 64%)] Loss: 5.66 (5.51) Time: 0.421s, 2431.53/s (0.428s, 2390.95/s) LR: 2.852e-01 Data: 0.028 (0.034) +Train: 54 [ 250/312 ( 80%)] Loss: 5.54 (5.52) Time: 0.423s, 2421.53/s (0.427s, 2397.44/s) LR: 2.852e-01 Data: 0.027 (0.033) +Train: 54 [ 300/312 ( 96%)] Loss: 5.44 (5.54) Time: 0.419s, 2441.12/s (0.426s, 2405.05/s) LR: 2.852e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.441 (1.441) Loss: 4.162 ( 4.162) Acc@1: 28.223 ( 28.223) Acc@5: 51.562 ( 51.562) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 3.970 ( 4.131) Acc@1: 31.604 ( 29.330) Acc@5: 52.358 ( 51.354) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar', 29.330000024414062) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-51.pth.tar', 29.07399998413086) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-48.pth.tar', 28.84800002685547) + +Train: 55 [ 0/312 ( 0%)] Loss: 5.38 (5.38) Time: 1.582s, 647.21/s (1.582s, 647.21/s) LR: 2.813e-01 Data: 1.202 (1.202) +Train: 55 [ 50/312 ( 16%)] Loss: 5.49 (5.42) Time: 0.423s, 2420.43/s (0.442s, 2314.87/s) LR: 2.813e-01 Data: 0.027 (0.050) +Train: 55 [ 100/312 ( 32%)] Loss: 5.52 (5.45) Time: 0.424s, 2413.96/s (0.432s, 2372.78/s) LR: 2.813e-01 Data: 0.029 (0.039) +Train: 55 [ 150/312 ( 48%)] Loss: 5.49 (5.47) Time: 0.422s, 2425.84/s (0.428s, 2392.46/s) LR: 2.813e-01 Data: 0.029 (0.035) +Train: 55 [ 200/312 ( 64%)] Loss: 5.67 (5.49) Time: 0.424s, 2416.01/s (0.427s, 2400.75/s) LR: 2.813e-01 Data: 0.030 (0.033) +Train: 55 [ 250/312 ( 80%)] Loss: 5.64 (5.50) Time: 0.419s, 2445.83/s (0.426s, 2404.93/s) LR: 2.813e-01 Data: 0.027 (0.032) +Train: 55 [ 300/312 ( 96%)] Loss: 5.54 (5.52) Time: 0.417s, 2456.69/s (0.425s, 2409.41/s) LR: 2.813e-01 Data: 0.032 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.439 (1.439) Loss: 4.129 ( 4.129) Acc@1: 29.395 ( 29.395) Acc@5: 52.246 ( 52.246) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.095 ( 4.188) Acc@1: 29.245 ( 29.152) Acc@5: 52.005 ( 51.240) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar', 29.330000024414062) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-55.pth.tar', 29.152000034179686) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-51.pth.tar', 29.07399998413086) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-43.pth.tar', 28.965999995117187) + +Train: 56 [ 0/312 ( 0%)] Loss: 5.44 (5.44) Time: 1.652s, 619.88/s (1.652s, 619.88/s) LR: 2.775e-01 Data: 1.258 (1.258) +Train: 56 [ 50/312 ( 16%)] Loss: 5.35 (5.40) Time: 0.416s, 2461.61/s (0.440s, 2325.68/s) LR: 2.775e-01 Data: 0.029 (0.052) +Train: 56 [ 100/312 ( 32%)] Loss: 5.46 (5.43) Time: 0.418s, 2452.61/s (0.430s, 2381.67/s) LR: 2.775e-01 Data: 0.027 (0.040) +Train: 56 [ 150/312 ( 48%)] Loss: 5.54 (5.45) Time: 0.419s, 2441.94/s (0.427s, 2400.04/s) LR: 2.775e-01 Data: 0.028 (0.036) +Train: 56 [ 200/312 ( 64%)] Loss: 5.48 (5.47) Time: 0.429s, 2387.75/s (0.425s, 2408.23/s) LR: 2.775e-01 Data: 0.035 (0.034) +Train: 56 [ 250/312 ( 80%)] Loss: 5.64 (5.48) Time: 0.424s, 2417.89/s (0.424s, 2412.33/s) LR: 2.775e-01 Data: 0.028 (0.032) +Train: 56 [ 300/312 ( 96%)] Loss: 5.46 (5.49) Time: 0.418s, 2447.52/s (0.424s, 2415.43/s) LR: 2.775e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.449 (1.449) Loss: 4.167 ( 4.167) Acc@1: 29.004 ( 29.004) Acc@5: 49.805 ( 49.805) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.035 ( 4.162) Acc@1: 30.307 ( 29.330) Acc@5: 52.476 ( 50.952) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar', 29.330000024414062) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-56.pth.tar', 29.330000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-55.pth.tar', 29.152000034179686) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-51.pth.tar', 29.07399998413086) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-44.pth.tar', 29.049999995117187) + +Train: 57 [ 0/312 ( 0%)] Loss: 5.33 (5.33) Time: 1.664s, 615.22/s (1.664s, 615.22/s) LR: 2.736e-01 Data: 1.285 (1.285) +Train: 57 [ 50/312 ( 16%)] Loss: 5.31 (5.37) Time: 0.420s, 2439.39/s (0.443s, 2310.05/s) LR: 2.736e-01 Data: 0.029 (0.052) +Train: 57 [ 100/312 ( 32%)] Loss: 5.42 (5.40) Time: 0.416s, 2460.62/s (0.431s, 2374.51/s) LR: 2.736e-01 Data: 0.029 (0.040) +Train: 57 [ 150/312 ( 48%)] Loss: 5.60 (5.42) Time: 0.413s, 2479.02/s (0.426s, 2405.60/s) LR: 2.736e-01 Data: 0.029 (0.036) +Train: 57 [ 200/312 ( 64%)] Loss: 5.45 (5.44) Time: 0.422s, 2426.08/s (0.424s, 2417.55/s) LR: 2.736e-01 Data: 0.028 (0.034) +Train: 57 [ 250/312 ( 80%)] Loss: 5.52 (5.46) Time: 0.423s, 2418.79/s (0.423s, 2422.04/s) LR: 2.736e-01 Data: 0.028 (0.033) +Train: 57 [ 300/312 ( 96%)] Loss: 5.53 (5.47) Time: 0.415s, 2469.00/s (0.422s, 2424.17/s) LR: 2.736e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 4.168 ( 4.168) Acc@1: 30.078 ( 30.078) Acc@5: 52.832 ( 52.832) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.044 ( 4.174) Acc@1: 30.307 ( 29.294) Acc@5: 53.774 ( 51.524) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar', 29.330000024414062) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-56.pth.tar', 29.330000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-57.pth.tar', 29.294000010375978) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-55.pth.tar', 29.152000034179686) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-51.pth.tar', 29.07399998413086) + +Train: 58 [ 0/312 ( 0%)] Loss: 5.32 (5.32) Time: 1.789s, 572.25/s (1.789s, 572.25/s) LR: 2.697e-01 Data: 1.130 (1.130) +Train: 58 [ 50/312 ( 16%)] Loss: 5.39 (5.34) Time: 0.420s, 2435.39/s (0.445s, 2301.74/s) LR: 2.697e-01 Data: 0.029 (0.049) +Train: 58 [ 100/312 ( 32%)] Loss: 5.45 (5.37) Time: 0.420s, 2436.82/s (0.433s, 2367.20/s) LR: 2.697e-01 Data: 0.028 (0.039) +Train: 58 [ 150/312 ( 48%)] Loss: 5.44 (5.39) Time: 0.419s, 2446.12/s (0.428s, 2391.42/s) LR: 2.697e-01 Data: 0.027 (0.035) +Train: 58 [ 200/312 ( 64%)] Loss: 5.53 (5.41) Time: 0.418s, 2450.18/s (0.426s, 2401.91/s) LR: 2.697e-01 Data: 0.028 (0.033) +Train: 58 [ 250/312 ( 80%)] Loss: 5.52 (5.43) Time: 0.416s, 2461.00/s (0.425s, 2408.67/s) LR: 2.697e-01 Data: 0.028 (0.032) +Train: 58 [ 300/312 ( 96%)] Loss: 5.55 (5.45) Time: 0.418s, 2451.06/s (0.424s, 2412.82/s) LR: 2.697e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.425 (1.425) Loss: 4.456 ( 4.456) Acc@1: 24.805 ( 24.805) Acc@5: 46.289 ( 46.289) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.318 ( 4.463) Acc@1: 26.297 ( 25.114) Acc@5: 47.052 ( 45.266) +Train: 59 [ 0/312 ( 0%)] Loss: 5.31 (5.31) Time: 1.771s, 578.33/s (1.771s, 578.33/s) LR: 2.658e-01 Data: 1.391 (1.391) +Train: 59 [ 50/312 ( 16%)] Loss: 5.31 (5.32) Time: 0.416s, 2459.78/s (0.444s, 2305.26/s) LR: 2.658e-01 Data: 0.027 (0.054) +Train: 59 [ 100/312 ( 32%)] Loss: 5.43 (5.36) Time: 0.423s, 2421.32/s (0.432s, 2372.08/s) LR: 2.658e-01 Data: 0.027 (0.041) +Train: 59 [ 150/312 ( 48%)] Loss: 5.43 (5.38) Time: 0.418s, 2447.14/s (0.428s, 2393.87/s) LR: 2.658e-01 Data: 0.028 (0.037) +Train: 59 [ 200/312 ( 64%)] Loss: 5.46 (5.40) Time: 0.423s, 2420.19/s (0.426s, 2404.85/s) LR: 2.658e-01 Data: 0.028 (0.034) +Train: 59 [ 250/312 ( 80%)] Loss: 5.45 (5.41) Time: 0.425s, 2409.80/s (0.425s, 2410.51/s) LR: 2.658e-01 Data: 0.032 (0.033) +Train: 59 [ 300/312 ( 96%)] Loss: 5.55 (5.42) Time: 0.421s, 2429.46/s (0.424s, 2414.06/s) LR: 2.658e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.458 (1.458) Loss: 4.074 ( 4.074) Acc@1: 31.934 ( 31.934) Acc@5: 51.758 ( 51.758) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 3.970 ( 4.114) Acc@1: 32.783 ( 30.156) Acc@5: 54.127 ( 52.042) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-59.pth.tar', 30.15600001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar', 29.330000024414062) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-56.pth.tar', 29.330000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-57.pth.tar', 29.294000010375978) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-55.pth.tar', 29.152000034179686) + +Train: 60 [ 0/312 ( 0%)] Loss: 5.19 (5.19) Time: 1.487s, 688.70/s (1.487s, 688.70/s) LR: 2.618e-01 Data: 1.108 (1.108) +Train: 60 [ 50/312 ( 16%)] Loss: 5.32 (5.29) Time: 0.419s, 2442.55/s (0.436s, 2346.60/s) LR: 2.618e-01 Data: 0.028 (0.049) +Train: 60 [ 100/312 ( 32%)] Loss: 5.40 (5.32) Time: 0.421s, 2435.12/s (0.428s, 2394.38/s) LR: 2.618e-01 Data: 0.027 (0.038) +Train: 60 [ 150/312 ( 48%)] Loss: 5.46 (5.35) Time: 0.418s, 2446.86/s (0.425s, 2408.86/s) LR: 2.618e-01 Data: 0.027 (0.035) +Train: 60 [ 200/312 ( 64%)] Loss: 5.45 (5.37) Time: 0.418s, 2451.77/s (0.424s, 2416.26/s) LR: 2.618e-01 Data: 0.027 (0.033) +Train: 60 [ 250/312 ( 80%)] Loss: 5.40 (5.39) Time: 0.421s, 2432.46/s (0.423s, 2419.52/s) LR: 2.618e-01 Data: 0.028 (0.032) +Train: 60 [ 300/312 ( 96%)] Loss: 5.49 (5.40) Time: 0.422s, 2428.09/s (0.423s, 2421.64/s) LR: 2.618e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.484 (1.484) Loss: 4.231 ( 4.231) Acc@1: 29.395 ( 29.395) Acc@5: 49.805 ( 49.805) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.162 ( 4.239) Acc@1: 29.717 ( 28.214) Acc@5: 50.472 ( 49.118) +Train: 61 [ 0/312 ( 0%)] Loss: 5.33 (5.33) Time: 1.608s, 636.63/s (1.608s, 636.63/s) LR: 2.578e-01 Data: 1.230 (1.230) +Train: 61 [ 50/312 ( 16%)] Loss: 5.46 (5.27) Time: 0.418s, 2446.91/s (0.441s, 2320.55/s) LR: 2.578e-01 Data: 0.028 (0.051) +Train: 61 [ 100/312 ( 32%)] Loss: 5.43 (5.30) Time: 0.419s, 2445.99/s (0.430s, 2379.70/s) LR: 2.578e-01 Data: 0.027 (0.040) +Train: 61 [ 150/312 ( 48%)] Loss: 5.40 (5.33) Time: 0.420s, 2438.85/s (0.427s, 2400.11/s) LR: 2.578e-01 Data: 0.029 (0.036) +Train: 61 [ 200/312 ( 64%)] Loss: 5.54 (5.35) Time: 0.422s, 2429.08/s (0.425s, 2409.53/s) LR: 2.578e-01 Data: 0.027 (0.034) +Train: 61 [ 250/312 ( 80%)] Loss: 5.45 (5.37) Time: 0.421s, 2430.22/s (0.424s, 2414.71/s) LR: 2.578e-01 Data: 0.026 (0.032) +Train: 61 [ 300/312 ( 96%)] Loss: 5.46 (5.38) Time: 0.416s, 2461.05/s (0.424s, 2417.35/s) LR: 2.578e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 4.118 ( 4.118) Acc@1: 30.078 ( 30.078) Acc@5: 51.074 ( 51.074) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.018 ( 4.142) Acc@1: 33.255 ( 29.252) Acc@5: 52.241 ( 50.664) +Current checkpoints: + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-59.pth.tar', 30.15600001953125) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-46.pth.tar', 29.864000028076173) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-45.pth.tar', 29.761999970703126) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-41.pth.tar', 29.65000002319336) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-47.pth.tar', 29.39800007080078) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-54.pth.tar', 29.330000024414062) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-56.pth.tar', 29.330000010375976) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-50.pth.tar', 29.295999973144532) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-57.pth.tar', 29.294000010375978) + ('./output/train/ImageNetTraining60.0-frac-1over4/checkpoint-61.pth.tar', 29.25200003051758) + +Train: 62 [ 0/312 ( 0%)] Loss: 5.25 (5.25) Time: 1.718s, 596.00/s (1.718s, 596.00/s) LR: 2.538e-01 Data: 1.338 (1.338) +Train: 62 [ 50/312 ( 16%)] Loss: 5.15 (5.25) Time: 0.409s, 2502.61/s (0.443s, 2314.05/s) LR: 2.538e-01 Data: 0.027 (0.053) +Train: 62 [ 100/312 ( 32%)] Loss: 5.39 (5.27) Time: 0.410s, 2495.72/s (0.427s, 2396.35/s) LR: 2.538e-01 Data: 0.028 (0.040) +Train: 62 [ 150/312 ( 48%)] Loss: 5.42 (5.30) Time: 0.408s, 2511.16/s (0.421s, 2432.59/s) LR: 2.538e-01 Data: 0.028 (0.036) +Train: 62 [ 200/312 ( 64%)] Loss: 5.41 (5.32) Time: 0.418s, 2448.67/s (0.418s, 2450.14/s) LR: 2.538e-01 Data: 0.036 (0.034) +Train: 62 [ 250/312 ( 80%)] Loss: 5.46 (5.34) Time: 0.419s, 2444.17/s (0.417s, 2456.18/s) LR: 2.538e-01 Data: 0.028 (0.033) +Train: 62 [ 300/312 ( 96%)] Loss: 5.45 (5.36) Time: 0.420s, 2437.94/s (0.418s, 2452.28/s) LR: 2.538e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.437 (1.437) Loss: 4.259 ( 4.259) Acc@1: 29.297 ( 29.297) Acc@5: 48.145 ( 48.145) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.033 ( 4.196) Acc@1: 31.722 ( 28.886) Acc@5: 51.769 ( 50.282) +Train: 63 [ 0/312 ( 0%)] Loss: 5.25 (5.25) Time: 1.489s, 687.50/s (1.489s, 687.50/s) LR: 2.497e-01 Data: 1.114 (1.114) +Train: 63 [ 50/312 ( 16%)] Loss: 5.24 (5.23) Time: 0.407s, 2515.64/s (0.437s, 2340.86/s) LR: 2.497e-01 Data: 0.027 (0.055) +Train: 63 [ 100/312 ( 32%)] Loss: 5.31 (5.25) Time: 0.421s, 2431.70/s (0.426s, 2405.96/s) LR: 2.497e-01 Data: 0.027 (0.041) +Train: 63 [ 150/312 ( 48%)] Loss: 5.34 (5.28) Time: 0.423s, 2420.74/s (0.424s, 2413.14/s) LR: 2.497e-01 Data: 0.027 (0.037) +Train: 63 [ 200/312 ( 64%)] Loss: 5.33 (5.30) Time: 0.416s, 2462.18/s (0.424s, 2413.06/s) LR: 2.497e-01 Data: 0.027 (0.035) +Train: 63 [ 250/312 ( 80%)] Loss: 5.32 (5.31) Time: 0.410s, 2498.46/s (0.422s, 2425.18/s) LR: 2.497e-01 Data: 0.026 (0.033) +Train: 63 [ 300/312 ( 96%)] Loss: 5.48 (5.33) Time: 0.410s, 2498.77/s (0.420s, 2437.68/s) LR: 2.497e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 4.250 ( 4.250) Acc@1: 29.297 ( 29.297) Acc@5: 49.512 ( 49.512) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.097 ( 4.259) Acc@1: 30.307 ( 28.104) Acc@5: 50.708 ( 48.862) +Train: 64 [ 0/312 ( 0%)] Loss: 5.18 (5.18) Time: 1.751s, 584.92/s (1.751s, 584.92/s) LR: 2.457e-01 Data: 1.221 (1.221) +Train: 64 [ 50/312 ( 16%)] Loss: 5.22 (5.19) Time: 0.416s, 2459.92/s (0.438s, 2339.04/s) LR: 2.457e-01 Data: 0.028 (0.051) +Train: 64 [ 100/312 ( 32%)] Loss: 5.34 (5.23) Time: 0.419s, 2441.02/s (0.429s, 2387.35/s) LR: 2.457e-01 Data: 0.026 (0.039) +Train: 64 [ 150/312 ( 48%)] Loss: 5.37 (5.26) Time: 0.414s, 2473.75/s (0.424s, 2413.97/s) LR: 2.457e-01 Data: 0.027 (0.035) +Train: 64 [ 200/312 ( 64%)] Loss: 5.47 (5.28) Time: 0.409s, 2503.05/s (0.421s, 2434.40/s) LR: 2.457e-01 Data: 0.027 (0.033) +Train: 64 [ 250/312 ( 80%)] Loss: 5.43 (5.30) Time: 0.417s, 2453.94/s (0.419s, 2444.27/s) LR: 2.457e-01 Data: 0.029 (0.032) +Train: 64 [ 300/312 ( 96%)] Loss: 5.31 (5.31) Time: 0.421s, 2432.91/s (0.419s, 2443.64/s) LR: 2.457e-01 Data: 0.025 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.426 (1.426) Loss: 4.112 ( 4.112) Acc@1: 28.711 ( 28.711) Acc@5: 51.465 ( 51.465) +Test: [ 48/48] Time: 0.092 (0.328) Loss: 4.043 ( 4.191) Acc@1: 31.958 ( 29.008) Acc@5: 50.708 ( 50.088) +Train: 65 [ 0/312 ( 0%)] Loss: 5.07 (5.07) Time: 1.743s, 587.35/s (1.743s, 587.35/s) LR: 2.416e-01 Data: 1.363 (1.363) +Train: 65 [ 50/312 ( 16%)] Loss: 5.31 (5.20) Time: 0.419s, 2445.09/s (0.442s, 2314.74/s) LR: 2.416e-01 Data: 0.026 (0.053) +Train: 65 [ 100/312 ( 32%)] Loss: 5.36 (5.21) Time: 0.429s, 2385.78/s (0.432s, 2372.24/s) LR: 2.416e-01 Data: 0.029 (0.040) +Train: 65 [ 150/312 ( 48%)] Loss: 5.32 (5.24) Time: 0.424s, 2415.85/s (0.429s, 2386.67/s) LR: 2.416e-01 Data: 0.028 (0.036) +Train: 65 [ 200/312 ( 64%)] Loss: 5.36 (5.26) Time: 0.418s, 2449.60/s (0.428s, 2394.25/s) LR: 2.416e-01 Data: 0.027 (0.034) +Train: 65 [ 250/312 ( 80%)] Loss: 5.26 (5.27) Time: 0.420s, 2439.27/s (0.426s, 2402.24/s) LR: 2.416e-01 Data: 0.027 (0.033) +Train: 65 [ 300/312 ( 96%)] Loss: 5.26 (5.29) Time: 0.416s, 2461.94/s (0.425s, 2410.93/s) LR: 2.416e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.464 (1.464) Loss: 4.210 ( 4.210) Acc@1: 27.734 ( 27.734) Acc@5: 49.707 ( 49.707) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.053 ( 4.229) Acc@1: 30.542 ( 28.388) Acc@5: 52.594 ( 49.006) +Train: 66 [ 0/312 ( 0%)] Loss: 5.19 (5.19) Time: 1.601s, 639.58/s (1.601s, 639.58/s) LR: 2.375e-01 Data: 1.207 (1.207) +Train: 66 [ 50/312 ( 16%)] Loss: 5.27 (5.15) Time: 0.417s, 2455.86/s (0.444s, 2305.42/s) LR: 2.375e-01 Data: 0.026 (0.051) +Train: 66 [ 100/312 ( 32%)] Loss: 5.21 (5.18) Time: 0.417s, 2453.43/s (0.431s, 2376.35/s) LR: 2.375e-01 Data: 0.027 (0.040) +Train: 66 [ 150/312 ( 48%)] Loss: 5.30 (5.21) Time: 0.420s, 2439.31/s (0.427s, 2397.43/s) LR: 2.375e-01 Data: 0.027 (0.036) +Train: 66 [ 200/312 ( 64%)] Loss: 5.28 (5.23) Time: 0.421s, 2434.84/s (0.425s, 2406.69/s) LR: 2.375e-01 Data: 0.026 (0.034) +Train: 66 [ 250/312 ( 80%)] Loss: 5.42 (5.25) Time: 0.414s, 2472.85/s (0.424s, 2416.05/s) LR: 2.375e-01 Data: 0.026 (0.032) +Train: 66 [ 300/312 ( 96%)] Loss: 5.33 (5.26) Time: 0.418s, 2448.76/s (0.423s, 2422.41/s) LR: 2.375e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.464 (1.464) Loss: 4.215 ( 4.215) Acc@1: 29.199 ( 29.199) Acc@5: 49.316 ( 49.316) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.112 ( 4.242) Acc@1: 31.486 ( 28.468) Acc@5: 51.179 ( 49.182) +Train: 67 [ 0/312 ( 0%)] Loss: 5.13 (5.13) Time: 1.820s, 562.73/s (1.820s, 562.73/s) LR: 2.334e-01 Data: 1.438 (1.438) +Train: 67 [ 50/312 ( 16%)] Loss: 5.12 (5.13) Time: 0.419s, 2442.94/s (0.447s, 2291.14/s) LR: 2.334e-01 Data: 0.027 (0.055) +Train: 67 [ 100/312 ( 32%)] Loss: 5.17 (5.16) Time: 0.412s, 2484.01/s (0.433s, 2364.03/s) LR: 2.334e-01 Data: 0.027 (0.041) +Train: 67 [ 150/312 ( 48%)] Loss: 5.17 (5.18) Time: 0.418s, 2452.01/s (0.428s, 2394.85/s) LR: 2.334e-01 Data: 0.027 (0.037) +Train: 67 [ 200/312 ( 64%)] Loss: 5.27 (5.21) Time: 0.420s, 2436.35/s (0.425s, 2406.78/s) LR: 2.334e-01 Data: 0.027 (0.034) +Train: 67 [ 250/312 ( 80%)] Loss: 5.28 (5.23) Time: 0.418s, 2451.30/s (0.425s, 2410.07/s) LR: 2.334e-01 Data: 0.028 (0.033) +Train: 67 [ 300/312 ( 96%)] Loss: 5.38 (5.24) Time: 0.420s, 2439.66/s (0.424s, 2415.07/s) LR: 2.334e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 4.302 ( 4.302) Acc@1: 27.734 ( 27.734) Acc@5: 47.852 ( 47.852) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.202 ( 4.338) Acc@1: 28.302 ( 27.240) Acc@5: 48.349 ( 47.202) +Train: 68 [ 0/312 ( 0%)] Loss: 4.95 (4.95) Time: 2.096s, 488.53/s (2.096s, 488.53/s) LR: 2.292e-01 Data: 1.716 (1.716) +Train: 68 [ 50/312 ( 16%)] Loss: 5.09 (5.09) Time: 0.417s, 2454.30/s (0.451s, 2270.62/s) LR: 2.292e-01 Data: 0.027 (0.061) +Train: 68 [ 100/312 ( 32%)] Loss: 5.09 (5.13) Time: 0.420s, 2439.99/s (0.436s, 2346.27/s) LR: 2.292e-01 Data: 0.027 (0.045) +Train: 68 [ 150/312 ( 48%)] Loss: 5.25 (5.16) Time: 0.420s, 2439.59/s (0.430s, 2378.91/s) LR: 2.292e-01 Data: 0.026 (0.039) +Train: 68 [ 200/312 ( 64%)] Loss: 5.37 (5.18) Time: 0.424s, 2414.60/s (0.428s, 2392.20/s) LR: 2.292e-01 Data: 0.028 (0.036) +Train: 68 [ 250/312 ( 80%)] Loss: 5.37 (5.20) Time: 0.420s, 2435.23/s (0.427s, 2398.26/s) LR: 2.292e-01 Data: 0.027 (0.034) +Train: 68 [ 300/312 ( 96%)] Loss: 5.26 (5.21) Time: 0.424s, 2417.58/s (0.426s, 2402.92/s) LR: 2.292e-01 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.441 (1.441) Loss: 4.283 ( 4.283) Acc@1: 27.637 ( 27.637) Acc@5: 47.266 ( 47.266) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.165 ( 4.336) Acc@1: 28.302 ( 26.648) Acc@5: 48.113 ( 46.436) +Train: 69 [ 0/312 ( 0%)] Loss: 5.00 (5.00) Time: 1.729s, 592.31/s (1.729s, 592.31/s) LR: 2.251e-01 Data: 1.303 (1.303) +Train: 69 [ 50/312 ( 16%)] Loss: 5.09 (5.09) Time: 0.422s, 2428.31/s (0.449s, 2282.54/s) LR: 2.251e-01 Data: 0.028 (0.052) +Train: 69 [ 100/312 ( 32%)] Loss: 5.11 (5.11) Time: 0.423s, 2423.25/s (0.435s, 2351.53/s) LR: 2.251e-01 Data: 0.027 (0.040) +Train: 69 [ 150/312 ( 48%)] Loss: 5.22 (5.14) Time: 0.424s, 2417.76/s (0.431s, 2373.84/s) LR: 2.251e-01 Data: 0.029 (0.036) +Train: 69 [ 200/312 ( 64%)] Loss: 5.19 (5.16) Time: 0.423s, 2421.85/s (0.429s, 2385.58/s) LR: 2.251e-01 Data: 0.026 (0.034) +Train: 69 [ 250/312 ( 80%)] Loss: 5.28 (5.18) Time: 0.423s, 2421.85/s (0.428s, 2392.51/s) LR: 2.251e-01 Data: 0.028 (0.033) +Train: 69 [ 300/312 ( 96%)] Loss: 5.27 (5.19) Time: 0.424s, 2412.57/s (0.427s, 2396.93/s) LR: 2.251e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.440 (1.440) Loss: 4.339 ( 4.339) Acc@1: 26.074 ( 26.074) Acc@5: 45.605 ( 45.605) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.190 ( 4.387) Acc@1: 29.127 ( 26.312) Acc@5: 47.759 ( 46.072) +Train: 70 [ 0/312 ( 0%)] Loss: 5.07 (5.07) Time: 1.846s, 554.62/s (1.846s, 554.62/s) LR: 2.209e-01 Data: 1.175 (1.175) +Train: 70 [ 50/312 ( 16%)] Loss: 5.17 (5.06) Time: 0.421s, 2433.74/s (0.450s, 2277.15/s) LR: 2.209e-01 Data: 0.026 (0.050) +Train: 70 [ 100/312 ( 32%)] Loss: 5.03 (5.09) Time: 0.418s, 2451.04/s (0.436s, 2346.71/s) LR: 2.209e-01 Data: 0.028 (0.039) +Train: 70 [ 150/312 ( 48%)] Loss: 5.20 (5.11) Time: 0.423s, 2422.17/s (0.432s, 2370.44/s) LR: 2.209e-01 Data: 0.028 (0.035) +Train: 70 [ 200/312 ( 64%)] Loss: 5.19 (5.13) Time: 0.424s, 2412.73/s (0.430s, 2383.31/s) LR: 2.209e-01 Data: 0.027 (0.033) +Train: 70 [ 250/312 ( 80%)] Loss: 5.21 (5.15) Time: 0.417s, 2455.14/s (0.428s, 2394.10/s) LR: 2.209e-01 Data: 0.028 (0.032) +Train: 70 [ 300/312 ( 96%)] Loss: 5.25 (5.17) Time: 0.420s, 2439.33/s (0.426s, 2404.26/s) LR: 2.209e-01 Data: 0.030 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 4.290 ( 4.290) Acc@1: 28.711 ( 28.711) Acc@5: 47.266 ( 47.266) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.169 ( 4.292) Acc@1: 27.712 ( 27.540) Acc@5: 49.175 ( 47.748) +Train: 71 [ 0/312 ( 0%)] Loss: 4.98 (4.98) Time: 1.687s, 607.15/s (1.687s, 607.15/s) LR: 2.167e-01 Data: 1.190 (1.190) +Train: 71 [ 50/312 ( 16%)] Loss: 5.04 (5.03) Time: 0.420s, 2436.93/s (0.444s, 2304.75/s) LR: 2.167e-01 Data: 0.028 (0.050) +Train: 71 [ 100/312 ( 32%)] Loss: 4.99 (5.06) Time: 0.422s, 2429.04/s (0.433s, 2363.11/s) LR: 2.167e-01 Data: 0.026 (0.039) +Train: 71 [ 150/312 ( 48%)] Loss: 5.09 (5.09) Time: 0.422s, 2424.52/s (0.429s, 2384.41/s) LR: 2.167e-01 Data: 0.026 (0.035) +Train: 71 [ 200/312 ( 64%)] Loss: 5.12 (5.11) Time: 0.425s, 2409.72/s (0.428s, 2393.12/s) LR: 2.167e-01 Data: 0.029 (0.033) +Train: 71 [ 250/312 ( 80%)] Loss: 5.19 (5.13) Time: 0.422s, 2425.06/s (0.427s, 2398.58/s) LR: 2.167e-01 Data: 0.026 (0.032) +Train: 71 [ 300/312 ( 96%)] Loss: 5.29 (5.15) Time: 0.424s, 2416.54/s (0.426s, 2402.92/s) LR: 2.167e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 4.140 ( 4.140) Acc@1: 30.762 ( 30.762) Acc@5: 49.219 ( 49.219) +Test: [ 48/48] Time: 0.092 (0.328) Loss: 4.150 ( 4.261) Acc@1: 28.184 ( 27.456) Acc@5: 50.000 ( 47.970) +Train: 72 [ 0/312 ( 0%)] Loss: 4.92 (4.92) Time: 1.569s, 652.61/s (1.569s, 652.61/s) LR: 2.126e-01 Data: 1.188 (1.188) +Train: 72 [ 50/312 ( 16%)] Loss: 5.14 (5.01) Time: 0.423s, 2419.21/s (0.445s, 2300.56/s) LR: 2.126e-01 Data: 0.026 (0.050) +Train: 72 [ 100/312 ( 32%)] Loss: 5.08 (5.04) Time: 0.422s, 2427.89/s (0.434s, 2359.31/s) LR: 2.126e-01 Data: 0.028 (0.039) +Train: 72 [ 150/312 ( 48%)] Loss: 5.10 (5.07) Time: 0.421s, 2433.31/s (0.430s, 2380.64/s) LR: 2.126e-01 Data: 0.027 (0.035) +Train: 72 [ 200/312 ( 64%)] Loss: 5.31 (5.09) Time: 0.422s, 2426.84/s (0.428s, 2390.71/s) LR: 2.126e-01 Data: 0.027 (0.033) +Train: 72 [ 250/312 ( 80%)] Loss: 5.16 (5.10) Time: 0.423s, 2423.64/s (0.427s, 2396.50/s) LR: 2.126e-01 Data: 0.027 (0.032) +Train: 72 [ 300/312 ( 96%)] Loss: 5.26 (5.12) Time: 0.420s, 2440.58/s (0.427s, 2400.48/s) LR: 2.126e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.445 (1.445) Loss: 4.262 ( 4.262) Acc@1: 28.320 ( 28.320) Acc@5: 47.266 ( 47.266) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.158 ( 4.298) Acc@1: 28.066 ( 27.742) Acc@5: 49.175 ( 47.552) +Train: 73 [ 0/312 ( 0%)] Loss: 5.03 (5.03) Time: 1.511s, 677.90/s (1.511s, 677.90/s) LR: 2.084e-01 Data: 1.131 (1.131) +Train: 73 [ 50/312 ( 16%)] Loss: 4.92 (4.98) Time: 0.426s, 2404.51/s (0.444s, 2304.51/s) LR: 2.084e-01 Data: 0.029 (0.049) +Train: 73 [ 100/312 ( 32%)] Loss: 5.15 (5.02) Time: 0.424s, 2417.32/s (0.433s, 2362.30/s) LR: 2.084e-01 Data: 0.025 (0.038) +Train: 73 [ 150/312 ( 48%)] Loss: 5.14 (5.05) Time: 0.421s, 2430.47/s (0.430s, 2383.12/s) LR: 2.084e-01 Data: 0.026 (0.035) +Train: 73 [ 200/312 ( 64%)] Loss: 5.12 (5.06) Time: 0.425s, 2407.16/s (0.428s, 2392.16/s) LR: 2.084e-01 Data: 0.027 (0.033) +Train: 73 [ 250/312 ( 80%)] Loss: 5.27 (5.08) Time: 0.422s, 2426.20/s (0.427s, 2397.69/s) LR: 2.084e-01 Data: 0.030 (0.032) +Train: 73 [ 300/312 ( 96%)] Loss: 5.23 (5.10) Time: 0.423s, 2418.36/s (0.426s, 2401.86/s) LR: 2.084e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.458 (1.458) Loss: 4.289 ( 4.289) Acc@1: 28.125 ( 28.125) Acc@5: 48.438 ( 48.438) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.102 ( 4.295) Acc@1: 28.774 ( 27.908) Acc@5: 50.943 ( 47.824) +Train: 74 [ 0/312 ( 0%)] Loss: 4.86 (4.86) Time: 1.913s, 535.39/s (1.913s, 535.39/s) LR: 2.042e-01 Data: 1.531 (1.531) +Train: 74 [ 50/312 ( 16%)] Loss: 5.00 (4.95) Time: 0.421s, 2431.75/s (0.450s, 2277.06/s) LR: 2.042e-01 Data: 0.027 (0.057) +Train: 74 [ 100/312 ( 32%)] Loss: 4.92 (4.98) Time: 0.425s, 2410.48/s (0.436s, 2348.12/s) LR: 2.042e-01 Data: 0.027 (0.042) +Train: 74 [ 150/312 ( 48%)] Loss: 5.08 (5.01) Time: 0.425s, 2408.27/s (0.432s, 2372.97/s) LR: 2.042e-01 Data: 0.030 (0.038) +Train: 74 [ 200/312 ( 64%)] Loss: 5.01 (5.03) Time: 0.420s, 2435.34/s (0.429s, 2384.48/s) LR: 2.042e-01 Data: 0.026 (0.035) +Train: 74 [ 250/312 ( 80%)] Loss: 5.12 (5.05) Time: 0.416s, 2458.82/s (0.427s, 2396.51/s) LR: 2.042e-01 Data: 0.026 (0.034) +Train: 74 [ 300/312 ( 96%)] Loss: 5.17 (5.07) Time: 0.421s, 2430.12/s (0.426s, 2405.13/s) LR: 2.042e-01 Data: 0.029 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.420 (1.420) Loss: 4.314 ( 4.314) Acc@1: 29.102 ( 29.102) Acc@5: 47.168 ( 47.168) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.259 ( 4.398) Acc@1: 28.538 ( 26.700) Acc@5: 46.108 ( 45.830) +Train: 75 [ 0/312 ( 0%)] Loss: 4.97 (4.97) Time: 1.405s, 728.67/s (1.405s, 728.67/s) LR: 2.000e-01 Data: 1.002 (1.002) +Train: 75 [ 50/312 ( 16%)] Loss: 4.89 (4.93) Time: 0.425s, 2410.14/s (0.436s, 2350.43/s) LR: 2.000e-01 Data: 0.032 (0.046) +Train: 75 [ 100/312 ( 32%)] Loss: 5.15 (4.97) Time: 0.427s, 2397.24/s (0.428s, 2390.05/s) LR: 2.000e-01 Data: 0.027 (0.037) +Train: 75 [ 150/312 ( 48%)] Loss: 5.03 (4.99) Time: 0.418s, 2448.94/s (0.426s, 2404.80/s) LR: 2.000e-01 Data: 0.027 (0.034) +Train: 75 [ 200/312 ( 64%)] Loss: 5.14 (5.01) Time: 0.426s, 2403.37/s (0.425s, 2410.97/s) LR: 2.000e-01 Data: 0.028 (0.033) +Train: 75 [ 250/312 ( 80%)] Loss: 5.21 (5.03) Time: 0.424s, 2416.90/s (0.424s, 2415.36/s) LR: 2.000e-01 Data: 0.029 (0.032) +Train: 75 [ 300/312 ( 96%)] Loss: 5.19 (5.05) Time: 0.429s, 2384.19/s (0.424s, 2416.92/s) LR: 2.000e-01 Data: 0.032 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.449 (1.449) Loss: 4.300 ( 4.300) Acc@1: 26.172 ( 26.172) Acc@5: 48.438 ( 48.438) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.193 ( 4.317) Acc@1: 28.184 ( 27.584) Acc@5: 47.759 ( 47.440) +Train: 76 [ 0/312 ( 0%)] Loss: 4.93 (4.93) Time: 1.620s, 632.17/s (1.620s, 632.17/s) LR: 1.958e-01 Data: 1.240 (1.240) +Train: 76 [ 50/312 ( 16%)] Loss: 4.94 (4.90) Time: 0.419s, 2441.72/s (0.445s, 2302.50/s) LR: 1.958e-01 Data: 0.025 (0.051) +Train: 76 [ 100/312 ( 32%)] Loss: 4.97 (4.93) Time: 0.422s, 2428.14/s (0.433s, 2363.57/s) LR: 1.958e-01 Data: 0.028 (0.040) +Train: 76 [ 150/312 ( 48%)] Loss: 5.06 (4.96) Time: 0.424s, 2412.72/s (0.430s, 2382.48/s) LR: 1.958e-01 Data: 0.028 (0.036) +Train: 76 [ 200/312 ( 64%)] Loss: 4.97 (4.98) Time: 0.425s, 2410.57/s (0.428s, 2393.92/s) LR: 1.958e-01 Data: 0.027 (0.034) +Train: 76 [ 250/312 ( 80%)] Loss: 5.14 (5.00) Time: 0.422s, 2427.36/s (0.426s, 2401.33/s) LR: 1.958e-01 Data: 0.028 (0.032) +Train: 76 [ 300/312 ( 96%)] Loss: 5.11 (5.02) Time: 0.422s, 2424.70/s (0.426s, 2406.19/s) LR: 1.958e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 4.390 ( 4.390) Acc@1: 26.172 ( 26.172) Acc@5: 44.336 ( 44.336) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.317 ( 4.425) Acc@1: 25.472 ( 26.048) Acc@5: 45.047 ( 44.824) +Train: 77 [ 0/312 ( 0%)] Loss: 4.83 (4.83) Time: 1.901s, 538.72/s (1.901s, 538.72/s) LR: 1.916e-01 Data: 1.154 (1.154) +Train: 77 [ 50/312 ( 16%)] Loss: 4.89 (4.88) Time: 0.423s, 2421.33/s (0.449s, 2281.66/s) LR: 1.916e-01 Data: 0.026 (0.049) +Train: 77 [ 100/312 ( 32%)] Loss: 4.94 (4.91) Time: 0.421s, 2430.63/s (0.435s, 2351.80/s) LR: 1.916e-01 Data: 0.027 (0.038) +Train: 77 [ 150/312 ( 48%)] Loss: 4.91 (4.94) Time: 0.417s, 2452.93/s (0.431s, 2374.80/s) LR: 1.916e-01 Data: 0.027 (0.034) +Train: 77 [ 200/312 ( 64%)] Loss: 5.19 (4.96) Time: 0.418s, 2449.44/s (0.429s, 2388.87/s) LR: 1.916e-01 Data: 0.028 (0.033) +Train: 77 [ 250/312 ( 80%)] Loss: 5.06 (4.98) Time: 0.421s, 2432.00/s (0.427s, 2396.83/s) LR: 1.916e-01 Data: 0.027 (0.032) +Train: 77 [ 300/312 ( 96%)] Loss: 5.16 (5.00) Time: 0.421s, 2432.42/s (0.426s, 2401.90/s) LR: 1.916e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.426 (1.426) Loss: 4.259 ( 4.259) Acc@1: 27.832 ( 27.832) Acc@5: 48.145 ( 48.145) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.193 ( 4.354) Acc@1: 28.420 ( 27.038) Acc@5: 46.934 ( 46.260) +Train: 78 [ 0/312 ( 0%)] Loss: 4.83 (4.83) Time: 1.502s, 681.82/s (1.502s, 681.82/s) LR: 1.874e-01 Data: 1.121 (1.121) +Train: 78 [ 50/312 ( 16%)] Loss: 4.90 (4.86) Time: 0.425s, 2408.76/s (0.443s, 2313.26/s) LR: 1.874e-01 Data: 0.028 (0.049) +Train: 78 [ 100/312 ( 32%)] Loss: 5.02 (4.89) Time: 0.422s, 2428.02/s (0.432s, 2368.20/s) LR: 1.874e-01 Data: 0.028 (0.038) +Train: 78 [ 150/312 ( 48%)] Loss: 5.06 (4.92) Time: 0.423s, 2421.35/s (0.429s, 2387.84/s) LR: 1.874e-01 Data: 0.027 (0.035) +Train: 78 [ 200/312 ( 64%)] Loss: 5.03 (4.94) Time: 0.418s, 2447.54/s (0.427s, 2397.59/s) LR: 1.874e-01 Data: 0.028 (0.033) +Train: 78 [ 250/312 ( 80%)] Loss: 4.98 (4.96) Time: 0.419s, 2445.23/s (0.426s, 2404.02/s) LR: 1.874e-01 Data: 0.028 (0.032) +Train: 78 [ 300/312 ( 96%)] Loss: 5.03 (4.97) Time: 0.425s, 2406.86/s (0.425s, 2408.96/s) LR: 1.874e-01 Data: 0.029 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 4.323 ( 4.323) Acc@1: 27.832 ( 27.832) Acc@5: 46.484 ( 46.484) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.192 ( 4.374) Acc@1: 30.189 ( 26.780) Acc@5: 46.698 ( 45.850) +Train: 79 [ 0/312 ( 0%)] Loss: 4.86 (4.86) Time: 1.757s, 582.66/s (1.757s, 582.66/s) LR: 1.833e-01 Data: 1.377 (1.377) +Train: 79 [ 50/312 ( 16%)] Loss: 4.84 (4.85) Time: 0.416s, 2458.84/s (0.445s, 2299.64/s) LR: 1.833e-01 Data: 0.026 (0.054) +Train: 79 [ 100/312 ( 32%)] Loss: 5.00 (4.87) Time: 0.425s, 2407.62/s (0.433s, 2363.08/s) LR: 1.833e-01 Data: 0.028 (0.041) +Train: 79 [ 150/312 ( 48%)] Loss: 4.95 (4.90) Time: 0.423s, 2420.07/s (0.429s, 2385.40/s) LR: 1.833e-01 Data: 0.027 (0.036) +Train: 79 [ 200/312 ( 64%)] Loss: 4.94 (4.92) Time: 0.423s, 2423.56/s (0.428s, 2394.74/s) LR: 1.833e-01 Data: 0.028 (0.034) +Train: 79 [ 250/312 ( 80%)] Loss: 5.05 (4.94) Time: 0.418s, 2449.42/s (0.426s, 2401.30/s) LR: 1.833e-01 Data: 0.025 (0.033) +Train: 79 [ 300/312 ( 96%)] Loss: 5.00 (4.95) Time: 0.423s, 2420.51/s (0.426s, 2406.48/s) LR: 1.833e-01 Data: 0.030 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.447 (1.447) Loss: 4.509 ( 4.509) Acc@1: 24.023 ( 24.023) Acc@5: 43.750 ( 43.750) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.382 ( 4.520) Acc@1: 24.057 ( 25.146) Acc@5: 44.222 ( 43.576) +Train: 80 [ 0/312 ( 0%)] Loss: 4.75 (4.75) Time: 1.663s, 615.92/s (1.663s, 615.92/s) LR: 1.791e-01 Data: 1.283 (1.283) +Train: 80 [ 50/312 ( 16%)] Loss: 4.85 (4.81) Time: 0.417s, 2453.71/s (0.441s, 2321.68/s) LR: 1.791e-01 Data: 0.028 (0.052) +Train: 80 [ 100/312 ( 32%)] Loss: 4.87 (4.84) Time: 0.420s, 2440.52/s (0.431s, 2373.33/s) LR: 1.791e-01 Data: 0.027 (0.040) +Train: 80 [ 150/312 ( 48%)] Loss: 4.84 (4.87) Time: 0.420s, 2440.29/s (0.428s, 2391.96/s) LR: 1.791e-01 Data: 0.026 (0.036) +Train: 80 [ 200/312 ( 64%)] Loss: 5.03 (4.89) Time: 0.422s, 2427.27/s (0.426s, 2401.48/s) LR: 1.791e-01 Data: 0.029 (0.034) +Train: 80 [ 250/312 ( 80%)] Loss: 5.01 (4.91) Time: 0.421s, 2432.31/s (0.425s, 2408.38/s) LR: 1.791e-01 Data: 0.028 (0.033) +Train: 80 [ 300/312 ( 96%)] Loss: 4.87 (4.93) Time: 0.428s, 2394.98/s (0.424s, 2412.25/s) LR: 1.791e-01 Data: 0.035 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.461 (1.461) Loss: 4.468 ( 4.468) Acc@1: 24.414 ( 24.414) Acc@5: 44.043 ( 44.043) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.317 ( 4.510) Acc@1: 26.179 ( 25.610) Acc@5: 45.873 ( 43.778) +Train: 81 [ 0/312 ( 0%)] Loss: 4.80 (4.80) Time: 1.576s, 649.68/s (1.576s, 649.68/s) LR: 1.749e-01 Data: 1.090 (1.090) +Train: 81 [ 50/312 ( 16%)] Loss: 4.79 (4.80) Time: 0.425s, 2407.76/s (0.443s, 2312.89/s) LR: 1.749e-01 Data: 0.028 (0.048) +Train: 81 [ 100/312 ( 32%)] Loss: 4.91 (4.82) Time: 0.425s, 2409.98/s (0.432s, 2370.92/s) LR: 1.749e-01 Data: 0.026 (0.038) +Train: 81 [ 150/312 ( 48%)] Loss: 4.86 (4.84) Time: 0.419s, 2445.32/s (0.428s, 2390.45/s) LR: 1.749e-01 Data: 0.029 (0.034) +Train: 81 [ 200/312 ( 64%)] Loss: 4.98 (4.86) Time: 0.419s, 2441.53/s (0.427s, 2400.67/s) LR: 1.749e-01 Data: 0.027 (0.033) +Train: 81 [ 250/312 ( 80%)] Loss: 4.96 (4.88) Time: 0.419s, 2442.04/s (0.426s, 2406.00/s) LR: 1.749e-01 Data: 0.028 (0.032) +Train: 81 [ 300/312 ( 96%)] Loss: 5.00 (4.90) Time: 0.424s, 2415.58/s (0.425s, 2410.00/s) LR: 1.749e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.461 (1.461) Loss: 4.426 ( 4.426) Acc@1: 27.246 ( 27.246) Acc@5: 45.605 ( 45.605) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.211 ( 4.440) Acc@1: 26.297 ( 26.198) Acc@5: 46.580 ( 44.842) +Train: 82 [ 0/312 ( 0%)] Loss: 4.81 (4.81) Time: 1.511s, 677.48/s (1.511s, 677.48/s) LR: 1.708e-01 Data: 1.077 (1.077) +Train: 82 [ 50/312 ( 16%)] Loss: 4.75 (4.75) Time: 0.424s, 2416.05/s (0.441s, 2321.77/s) LR: 1.708e-01 Data: 0.028 (0.047) +Train: 82 [ 100/312 ( 32%)] Loss: 4.90 (4.78) Time: 0.428s, 2392.99/s (0.431s, 2374.90/s) LR: 1.708e-01 Data: 0.029 (0.037) +Train: 82 [ 150/312 ( 48%)] Loss: 4.90 (4.81) Time: 0.421s, 2430.89/s (0.428s, 2394.24/s) LR: 1.708e-01 Data: 0.025 (0.034) +Train: 82 [ 200/312 ( 64%)] Loss: 4.91 (4.83) Time: 0.421s, 2431.28/s (0.426s, 2403.28/s) LR: 1.708e-01 Data: 0.028 (0.032) +Train: 82 [ 250/312 ( 80%)] Loss: 5.04 (4.86) Time: 0.420s, 2438.69/s (0.425s, 2408.23/s) LR: 1.708e-01 Data: 0.026 (0.031) +Train: 82 [ 300/312 ( 96%)] Loss: 5.01 (4.87) Time: 0.422s, 2426.43/s (0.425s, 2412.01/s) LR: 1.708e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.455 (1.455) Loss: 4.486 ( 4.486) Acc@1: 26.953 ( 26.953) Acc@5: 44.629 ( 44.629) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.354 ( 4.465) Acc@1: 26.179 ( 26.002) Acc@5: 45.519 ( 44.650) +Train: 83 [ 0/312 ( 0%)] Loss: 4.70 (4.70) Time: 1.995s, 513.37/s (1.995s, 513.37/s) LR: 1.666e-01 Data: 1.399 (1.399) +Train: 83 [ 50/312 ( 16%)] Loss: 4.72 (4.73) Time: 0.422s, 2427.84/s (0.451s, 2272.69/s) LR: 1.666e-01 Data: 0.027 (0.055) +Train: 83 [ 100/312 ( 32%)] Loss: 4.80 (4.77) Time: 0.420s, 2439.24/s (0.436s, 2350.40/s) LR: 1.666e-01 Data: 0.027 (0.041) +Train: 83 [ 150/312 ( 48%)] Loss: 4.96 (4.79) Time: 0.424s, 2415.55/s (0.431s, 2375.81/s) LR: 1.666e-01 Data: 0.028 (0.037) +Train: 83 [ 200/312 ( 64%)] Loss: 4.95 (4.81) Time: 0.424s, 2417.93/s (0.429s, 2389.40/s) LR: 1.666e-01 Data: 0.026 (0.034) +Train: 83 [ 250/312 ( 80%)] Loss: 4.96 (4.83) Time: 0.422s, 2427.01/s (0.427s, 2397.78/s) LR: 1.666e-01 Data: 0.027 (0.033) +Train: 83 [ 300/312 ( 96%)] Loss: 4.97 (4.85) Time: 0.422s, 2429.29/s (0.426s, 2403.36/s) LR: 1.666e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.435 (1.435) Loss: 4.490 ( 4.490) Acc@1: 25.684 ( 25.684) Acc@5: 44.922 ( 44.922) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.343 ( 4.492) Acc@1: 25.590 ( 25.552) Acc@5: 45.991 ( 44.138) +Train: 84 [ 0/312 ( 0%)] Loss: 4.66 (4.66) Time: 1.588s, 645.01/s (1.588s, 645.01/s) LR: 1.625e-01 Data: 1.208 (1.208) +Train: 84 [ 50/312 ( 16%)] Loss: 4.71 (4.69) Time: 0.421s, 2433.17/s (0.443s, 2309.94/s) LR: 1.625e-01 Data: 0.029 (0.051) +Train: 84 [ 100/312 ( 32%)] Loss: 4.90 (4.73) Time: 0.427s, 2398.21/s (0.432s, 2370.63/s) LR: 1.625e-01 Data: 0.036 (0.039) +Train: 84 [ 150/312 ( 48%)] Loss: 4.80 (4.76) Time: 0.419s, 2445.31/s (0.428s, 2390.06/s) LR: 1.625e-01 Data: 0.028 (0.035) +Train: 84 [ 200/312 ( 64%)] Loss: 4.85 (4.78) Time: 0.423s, 2423.13/s (0.427s, 2400.31/s) LR: 1.625e-01 Data: 0.028 (0.033) +Train: 84 [ 250/312 ( 80%)] Loss: 5.02 (4.80) Time: 0.420s, 2437.34/s (0.426s, 2405.62/s) LR: 1.625e-01 Data: 0.028 (0.032) +Train: 84 [ 300/312 ( 96%)] Loss: 4.89 (4.82) Time: 0.422s, 2424.37/s (0.425s, 2409.07/s) LR: 1.625e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.448 (1.448) Loss: 4.552 ( 4.552) Acc@1: 24.219 ( 24.219) Acc@5: 42.773 ( 42.773) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.337 ( 4.530) Acc@1: 25.943 ( 25.448) Acc@5: 45.047 ( 43.436) +Train: 85 [ 0/312 ( 0%)] Loss: 4.80 (4.80) Time: 2.032s, 503.85/s (2.032s, 503.85/s) LR: 1.584e-01 Data: 1.177 (1.177) +Train: 85 [ 50/312 ( 16%)] Loss: 4.79 (4.69) Time: 0.422s, 2425.59/s (0.452s, 2263.50/s) LR: 1.584e-01 Data: 0.027 (0.050) +Train: 85 [ 100/312 ( 32%)] Loss: 4.77 (4.72) Time: 0.425s, 2408.19/s (0.437s, 2343.89/s) LR: 1.584e-01 Data: 0.028 (0.039) +Train: 85 [ 150/312 ( 48%)] Loss: 4.79 (4.75) Time: 0.423s, 2423.33/s (0.432s, 2371.40/s) LR: 1.584e-01 Data: 0.027 (0.035) +Train: 85 [ 200/312 ( 64%)] Loss: 4.86 (4.77) Time: 0.420s, 2438.71/s (0.429s, 2385.70/s) LR: 1.584e-01 Data: 0.026 (0.033) +Train: 85 [ 250/312 ( 80%)] Loss: 4.88 (4.79) Time: 0.420s, 2436.57/s (0.428s, 2395.04/s) LR: 1.584e-01 Data: 0.026 (0.032) +Train: 85 [ 300/312 ( 96%)] Loss: 4.97 (4.81) Time: 0.424s, 2416.64/s (0.427s, 2400.10/s) LR: 1.584e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 4.665 ( 4.665) Acc@1: 24.707 ( 24.707) Acc@5: 41.699 ( 41.699) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.481 ( 4.614) Acc@1: 25.354 ( 24.430) Acc@5: 41.745 ( 42.266) +Train: 86 [ 0/312 ( 0%)] Loss: 4.60 (4.60) Time: 1.468s, 697.67/s (1.468s, 697.67/s) LR: 1.543e-01 Data: 1.088 (1.088) +Train: 86 [ 50/312 ( 16%)] Loss: 4.79 (4.65) Time: 0.422s, 2428.72/s (0.439s, 2334.55/s) LR: 1.543e-01 Data: 0.027 (0.048) +Train: 86 [ 100/312 ( 32%)] Loss: 4.72 (4.69) Time: 0.418s, 2449.83/s (0.430s, 2382.58/s) LR: 1.543e-01 Data: 0.028 (0.038) +Train: 86 [ 150/312 ( 48%)] Loss: 4.84 (4.72) Time: 0.420s, 2437.10/s (0.427s, 2399.95/s) LR: 1.543e-01 Data: 0.028 (0.034) +Train: 86 [ 200/312 ( 64%)] Loss: 4.84 (4.74) Time: 0.427s, 2399.62/s (0.425s, 2407.02/s) LR: 1.543e-01 Data: 0.034 (0.033) +Train: 86 [ 250/312 ( 80%)] Loss: 4.79 (4.76) Time: 0.418s, 2448.23/s (0.425s, 2411.42/s) LR: 1.543e-01 Data: 0.029 (0.032) +Train: 86 [ 300/312 ( 96%)] Loss: 4.94 (4.77) Time: 0.423s, 2421.06/s (0.424s, 2415.18/s) LR: 1.543e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.440 (1.440) Loss: 4.587 ( 4.587) Acc@1: 25.684 ( 25.684) Acc@5: 43.555 ( 43.555) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.467 ( 4.648) Acc@1: 25.472 ( 24.290) Acc@5: 42.925 ( 41.962) +Train: 87 [ 0/312 ( 0%)] Loss: 4.66 (4.66) Time: 2.218s, 461.59/s (2.218s, 461.59/s) LR: 1.503e-01 Data: 1.839 (1.839) +Train: 87 [ 50/312 ( 16%)] Loss: 4.71 (4.64) Time: 0.425s, 2407.87/s (0.457s, 2242.51/s) LR: 1.503e-01 Data: 0.028 (0.063) +Train: 87 [ 100/312 ( 32%)] Loss: 4.62 (4.67) Time: 0.417s, 2457.88/s (0.438s, 2337.46/s) LR: 1.503e-01 Data: 0.027 (0.045) +Train: 87 [ 150/312 ( 48%)] Loss: 4.85 (4.69) Time: 0.421s, 2435.00/s (0.431s, 2373.69/s) LR: 1.503e-01 Data: 0.028 (0.040) +Train: 87 [ 200/312 ( 64%)] Loss: 4.84 (4.71) Time: 0.419s, 2443.73/s (0.429s, 2388.88/s) LR: 1.503e-01 Data: 0.023 (0.036) +Train: 87 [ 250/312 ( 80%)] Loss: 4.87 (4.73) Time: 0.425s, 2406.74/s (0.427s, 2396.16/s) LR: 1.503e-01 Data: 0.026 (0.035) +Train: 87 [ 300/312 ( 96%)] Loss: 4.91 (4.75) Time: 0.424s, 2412.26/s (0.426s, 2402.25/s) LR: 1.503e-01 Data: 0.027 (0.034) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.428 (1.428) Loss: 4.615 ( 4.615) Acc@1: 24.219 ( 24.219) Acc@5: 43.359 ( 43.359) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.347 ( 4.581) Acc@1: 27.476 ( 24.678) Acc@5: 46.344 ( 42.942) +Train: 88 [ 0/312 ( 0%)] Loss: 4.54 (4.54) Time: 1.952s, 524.50/s (1.952s, 524.50/s) LR: 1.462e-01 Data: 1.091 (1.091) +Train: 88 [ 50/312 ( 16%)] Loss: 4.66 (4.60) Time: 0.426s, 2406.21/s (0.450s, 2275.93/s) LR: 1.462e-01 Data: 0.029 (0.049) +Train: 88 [ 100/312 ( 32%)] Loss: 4.58 (4.63) Time: 0.421s, 2435.03/s (0.436s, 2349.35/s) LR: 1.462e-01 Data: 0.027 (0.038) +Train: 88 [ 150/312 ( 48%)] Loss: 4.71 (4.66) Time: 0.424s, 2415.11/s (0.431s, 2375.66/s) LR: 1.462e-01 Data: 0.027 (0.035) +Train: 88 [ 200/312 ( 64%)] Loss: 4.75 (4.69) Time: 0.422s, 2423.80/s (0.428s, 2390.44/s) LR: 1.462e-01 Data: 0.027 (0.033) +Train: 88 [ 250/312 ( 80%)] Loss: 4.77 (4.71) Time: 0.424s, 2417.00/s (0.427s, 2396.93/s) LR: 1.462e-01 Data: 0.027 (0.032) +Train: 88 [ 300/312 ( 96%)] Loss: 4.81 (4.73) Time: 0.417s, 2453.15/s (0.426s, 2403.02/s) LR: 1.462e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.476 (1.476) Loss: 4.570 ( 4.570) Acc@1: 23.926 ( 23.926) Acc@5: 42.188 ( 42.188) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.441 ( 4.601) Acc@1: 24.410 ( 24.300) Acc@5: 43.278 ( 42.376) +Train: 89 [ 0/312 ( 0%)] Loss: 4.52 (4.52) Time: 1.521s, 673.31/s (1.521s, 673.31/s) LR: 1.422e-01 Data: 1.088 (1.088) +Train: 89 [ 50/312 ( 16%)] Loss: 4.58 (4.59) Time: 0.421s, 2430.17/s (0.443s, 2310.76/s) LR: 1.422e-01 Data: 0.027 (0.050) +Train: 89 [ 100/312 ( 32%)] Loss: 4.82 (4.62) Time: 0.426s, 2403.99/s (0.432s, 2369.53/s) LR: 1.422e-01 Data: 0.033 (0.039) +Train: 89 [ 150/312 ( 48%)] Loss: 4.76 (4.64) Time: 0.422s, 2425.86/s (0.429s, 2389.57/s) LR: 1.422e-01 Data: 0.026 (0.035) +Train: 89 [ 200/312 ( 64%)] Loss: 4.71 (4.66) Time: 0.421s, 2430.97/s (0.427s, 2399.25/s) LR: 1.422e-01 Data: 0.027 (0.033) +Train: 89 [ 250/312 ( 80%)] Loss: 4.83 (4.68) Time: 0.419s, 2442.66/s (0.426s, 2406.40/s) LR: 1.422e-01 Data: 0.027 (0.032) +Train: 89 [ 300/312 ( 96%)] Loss: 4.82 (4.70) Time: 0.421s, 2431.68/s (0.425s, 2410.17/s) LR: 1.422e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.501 (1.501) Loss: 4.538 ( 4.538) Acc@1: 25.293 ( 25.293) Acc@5: 43.652 ( 43.652) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.403 ( 4.580) Acc@1: 26.179 ( 24.798) Acc@5: 45.519 ( 42.720) +Train: 90 [ 0/312 ( 0%)] Loss: 4.52 (4.52) Time: 2.579s, 397.05/s (2.579s, 397.05/s) LR: 1.382e-01 Data: 1.283 (1.283) +Train: 90 [ 50/312 ( 16%)] Loss: 4.48 (4.55) Time: 0.419s, 2442.04/s (0.463s, 2210.52/s) LR: 1.382e-01 Data: 0.027 (0.052) +Train: 90 [ 100/312 ( 32%)] Loss: 4.65 (4.59) Time: 0.424s, 2416.85/s (0.443s, 2313.17/s) LR: 1.382e-01 Data: 0.026 (0.040) +Train: 90 [ 150/312 ( 48%)] Loss: 4.62 (4.62) Time: 0.427s, 2399.11/s (0.436s, 2350.40/s) LR: 1.382e-01 Data: 0.027 (0.036) +Train: 90 [ 200/312 ( 64%)] Loss: 4.66 (4.64) Time: 0.417s, 2453.47/s (0.432s, 2371.26/s) LR: 1.382e-01 Data: 0.027 (0.034) +Train: 90 [ 250/312 ( 80%)] Loss: 4.85 (4.66) Time: 0.421s, 2431.29/s (0.430s, 2381.61/s) LR: 1.382e-01 Data: 0.027 (0.033) +Train: 90 [ 300/312 ( 96%)] Loss: 4.76 (4.68) Time: 0.427s, 2395.62/s (0.429s, 2388.88/s) LR: 1.382e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.448 (1.448) Loss: 4.714 ( 4.714) Acc@1: 22.168 ( 22.168) Acc@5: 41.309 ( 41.309) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.568 ( 4.709) Acc@1: 22.642 ( 23.104) Acc@5: 41.156 ( 40.850) +Train: 91 [ 0/312 ( 0%)] Loss: 4.58 (4.58) Time: 1.678s, 610.18/s (1.678s, 610.18/s) LR: 1.342e-01 Data: 1.298 (1.298) +Train: 91 [ 50/312 ( 16%)] Loss: 4.56 (4.52) Time: 0.420s, 2440.46/s (0.445s, 2300.91/s) LR: 1.342e-01 Data: 0.028 (0.053) +Train: 91 [ 100/312 ( 32%)] Loss: 4.56 (4.56) Time: 0.418s, 2447.63/s (0.433s, 2366.12/s) LR: 1.342e-01 Data: 0.027 (0.040) +Train: 91 [ 150/312 ( 48%)] Loss: 4.64 (4.58) Time: 0.418s, 2447.66/s (0.429s, 2385.31/s) LR: 1.342e-01 Data: 0.028 (0.036) +Train: 91 [ 200/312 ( 64%)] Loss: 4.67 (4.60) Time: 0.416s, 2458.78/s (0.427s, 2396.80/s) LR: 1.342e-01 Data: 0.027 (0.034) +Train: 91 [ 250/312 ( 80%)] Loss: 4.60 (4.62) Time: 0.421s, 2433.42/s (0.426s, 2404.18/s) LR: 1.342e-01 Data: 0.024 (0.033) +Train: 91 [ 300/312 ( 96%)] Loss: 4.82 (4.64) Time: 0.423s, 2423.32/s (0.425s, 2408.30/s) LR: 1.342e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.439 (1.439) Loss: 4.733 ( 4.733) Acc@1: 23.047 ( 23.047) Acc@5: 40.137 ( 40.137) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.667 ( 4.765) Acc@1: 22.288 ( 22.988) Acc@5: 39.033 ( 40.176) +Train: 92 [ 0/312 ( 0%)] Loss: 4.42 (4.42) Time: 1.805s, 567.17/s (1.805s, 567.17/s) LR: 1.303e-01 Data: 1.425 (1.425) +Train: 92 [ 50/312 ( 16%)] Loss: 4.49 (4.51) Time: 0.421s, 2432.55/s (0.447s, 2291.53/s) LR: 1.303e-01 Data: 0.027 (0.055) +Train: 92 [ 100/312 ( 32%)] Loss: 4.66 (4.54) Time: 0.420s, 2435.28/s (0.434s, 2357.00/s) LR: 1.303e-01 Data: 0.028 (0.041) +Train: 92 [ 150/312 ( 48%)] Loss: 4.63 (4.57) Time: 0.420s, 2436.76/s (0.430s, 2380.29/s) LR: 1.303e-01 Data: 0.028 (0.037) +Train: 92 [ 200/312 ( 64%)] Loss: 4.60 (4.59) Time: 0.419s, 2445.24/s (0.428s, 2392.67/s) LR: 1.303e-01 Data: 0.025 (0.034) +Train: 92 [ 250/312 ( 80%)] Loss: 4.74 (4.60) Time: 0.423s, 2422.63/s (0.427s, 2400.41/s) LR: 1.303e-01 Data: 0.027 (0.033) +Train: 92 [ 300/312 ( 96%)] Loss: 4.73 (4.62) Time: 0.423s, 2422.28/s (0.426s, 2405.74/s) LR: 1.303e-01 Data: 0.032 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 4.741 ( 4.741) Acc@1: 23.926 ( 23.926) Acc@5: 41.699 ( 41.699) +Test: [ 48/48] Time: 0.092 (0.333) Loss: 4.531 ( 4.730) Acc@1: 24.175 ( 23.346) Acc@5: 43.632 ( 40.774) +Train: 93 [ 0/312 ( 0%)] Loss: 4.48 (4.48) Time: 1.666s, 614.50/s (1.666s, 614.50/s) LR: 1.264e-01 Data: 1.212 (1.212) +Train: 93 [ 50/312 ( 16%)] Loss: 4.51 (4.47) Time: 0.423s, 2419.47/s (0.446s, 2296.16/s) LR: 1.264e-01 Data: 0.028 (0.052) +Train: 93 [ 100/312 ( 32%)] Loss: 4.51 (4.51) Time: 0.424s, 2414.63/s (0.434s, 2360.09/s) LR: 1.264e-01 Data: 0.028 (0.040) +Train: 93 [ 150/312 ( 48%)] Loss: 4.59 (4.53) Time: 0.419s, 2444.04/s (0.430s, 2382.08/s) LR: 1.264e-01 Data: 0.027 (0.036) +Train: 93 [ 200/312 ( 64%)] Loss: 4.68 (4.56) Time: 0.426s, 2402.21/s (0.428s, 2394.44/s) LR: 1.264e-01 Data: 0.028 (0.034) +Train: 93 [ 250/312 ( 80%)] Loss: 4.65 (4.58) Time: 0.420s, 2435.81/s (0.426s, 2402.01/s) LR: 1.264e-01 Data: 0.026 (0.033) +Train: 93 [ 300/312 ( 96%)] Loss: 4.78 (4.60) Time: 0.420s, 2440.01/s (0.425s, 2406.95/s) LR: 1.264e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 4.560 ( 4.560) Acc@1: 23.438 ( 23.438) Acc@5: 42.676 ( 42.676) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.452 ( 4.613) Acc@1: 25.825 ( 24.376) Acc@5: 45.637 ( 42.146) +Train: 94 [ 0/312 ( 0%)] Loss: 4.36 (4.36) Time: 1.710s, 598.79/s (1.710s, 598.79/s) LR: 1.225e-01 Data: 1.214 (1.214) +Train: 94 [ 50/312 ( 16%)] Loss: 4.52 (4.46) Time: 0.423s, 2421.89/s (0.446s, 2297.92/s) LR: 1.225e-01 Data: 0.028 (0.051) +Train: 94 [ 100/312 ( 32%)] Loss: 4.55 (4.49) Time: 0.420s, 2436.00/s (0.434s, 2361.56/s) LR: 1.225e-01 Data: 0.027 (0.040) +Train: 94 [ 150/312 ( 48%)] Loss: 4.57 (4.51) Time: 0.420s, 2440.71/s (0.429s, 2385.43/s) LR: 1.225e-01 Data: 0.027 (0.036) +Train: 94 [ 200/312 ( 64%)] Loss: 4.62 (4.53) Time: 0.419s, 2446.04/s (0.428s, 2395.22/s) LR: 1.225e-01 Data: 0.029 (0.034) +Train: 94 [ 250/312 ( 80%)] Loss: 4.71 (4.55) Time: 0.419s, 2445.24/s (0.426s, 2402.31/s) LR: 1.225e-01 Data: 0.028 (0.032) +Train: 94 [ 300/312 ( 96%)] Loss: 4.69 (4.57) Time: 0.421s, 2433.60/s (0.426s, 2405.80/s) LR: 1.225e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 4.702 ( 4.702) Acc@1: 25.098 ( 25.098) Acc@5: 41.992 ( 41.992) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.560 ( 4.723) Acc@1: 24.646 ( 23.432) Acc@5: 43.278 ( 40.810) +Train: 95 [ 0/312 ( 0%)] Loss: 4.51 (4.51) Time: 2.007s, 510.19/s (2.007s, 510.19/s) LR: 1.187e-01 Data: 1.082 (1.082) +Train: 95 [ 50/312 ( 16%)] Loss: 4.48 (4.43) Time: 0.416s, 2462.17/s (0.452s, 2267.96/s) LR: 1.187e-01 Data: 0.029 (0.048) +Train: 95 [ 100/312 ( 32%)] Loss: 4.48 (4.46) Time: 0.424s, 2414.94/s (0.437s, 2345.89/s) LR: 1.187e-01 Data: 0.027 (0.038) +Train: 95 [ 150/312 ( 48%)] Loss: 4.61 (4.48) Time: 0.423s, 2423.27/s (0.432s, 2371.64/s) LR: 1.187e-01 Data: 0.029 (0.034) +Train: 95 [ 200/312 ( 64%)] Loss: 4.64 (4.50) Time: 0.422s, 2427.89/s (0.429s, 2385.48/s) LR: 1.187e-01 Data: 0.028 (0.032) +Train: 95 [ 250/312 ( 80%)] Loss: 4.47 (4.52) Time: 0.422s, 2424.52/s (0.428s, 2393.93/s) LR: 1.187e-01 Data: 0.025 (0.031) +Train: 95 [ 300/312 ( 96%)] Loss: 4.67 (4.54) Time: 0.423s, 2423.26/s (0.427s, 2400.08/s) LR: 1.187e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.447 (1.447) Loss: 4.753 ( 4.753) Acc@1: 24.512 ( 24.512) Acc@5: 40.234 ( 40.234) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.541 ( 4.760) Acc@1: 24.646 ( 23.070) Acc@5: 42.571 ( 40.330) +Train: 96 [ 0/312 ( 0%)] Loss: 4.37 (4.37) Time: 1.381s, 741.55/s (1.381s, 741.55/s) LR: 1.148e-01 Data: 1.001 (1.001) +Train: 96 [ 50/312 ( 16%)] Loss: 4.38 (4.40) Time: 0.418s, 2447.91/s (0.439s, 2330.56/s) LR: 1.148e-01 Data: 0.027 (0.047) +Train: 96 [ 100/312 ( 32%)] Loss: 4.41 (4.43) Time: 0.421s, 2429.55/s (0.431s, 2377.80/s) LR: 1.148e-01 Data: 0.027 (0.037) +Train: 96 [ 150/312 ( 48%)] Loss: 4.49 (4.46) Time: 0.425s, 2406.99/s (0.428s, 2394.13/s) LR: 1.148e-01 Data: 0.032 (0.034) +Train: 96 [ 200/312 ( 64%)] Loss: 4.53 (4.48) Time: 0.426s, 2405.53/s (0.426s, 2403.26/s) LR: 1.148e-01 Data: 0.027 (0.032) +Train: 96 [ 250/312 ( 80%)] Loss: 4.55 (4.50) Time: 0.423s, 2419.30/s (0.425s, 2408.09/s) LR: 1.148e-01 Data: 0.031 (0.032) +Train: 96 [ 300/312 ( 96%)] Loss: 4.56 (4.51) Time: 0.421s, 2430.32/s (0.425s, 2411.79/s) LR: 1.148e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.441 (1.441) Loss: 4.568 ( 4.568) Acc@1: 25.488 ( 25.488) Acc@5: 43.359 ( 43.359) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.421 ( 4.601) Acc@1: 25.708 ( 24.594) Acc@5: 45.165 ( 42.514) +Train: 97 [ 0/312 ( 0%)] Loss: 4.32 (4.32) Time: 1.596s, 641.80/s (1.596s, 641.80/s) LR: 1.111e-01 Data: 1.215 (1.215) +Train: 97 [ 50/312 ( 16%)] Loss: 4.45 (4.39) Time: 0.418s, 2449.84/s (0.443s, 2310.37/s) LR: 1.111e-01 Data: 0.023 (0.051) +Train: 97 [ 100/312 ( 32%)] Loss: 4.43 (4.41) Time: 0.417s, 2455.91/s (0.432s, 2369.34/s) LR: 1.111e-01 Data: 0.027 (0.039) +Train: 97 [ 150/312 ( 48%)] Loss: 4.43 (4.43) Time: 0.424s, 2412.60/s (0.428s, 2391.37/s) LR: 1.111e-01 Data: 0.031 (0.035) +Train: 97 [ 200/312 ( 64%)] Loss: 4.55 (4.45) Time: 0.420s, 2435.95/s (0.427s, 2400.54/s) LR: 1.111e-01 Data: 0.027 (0.033) +Train: 97 [ 250/312 ( 80%)] Loss: 4.59 (4.47) Time: 0.424s, 2415.77/s (0.426s, 2405.96/s) LR: 1.111e-01 Data: 0.027 (0.032) +Train: 97 [ 300/312 ( 96%)] Loss: 4.52 (4.48) Time: 0.422s, 2425.40/s (0.425s, 2409.87/s) LR: 1.111e-01 Data: 0.025 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.427 (1.427) Loss: 4.720 ( 4.720) Acc@1: 24.121 ( 24.121) Acc@5: 41.309 ( 41.309) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.633 ( 4.758) Acc@1: 22.052 ( 23.302) Acc@5: 40.094 ( 40.476) +Train: 98 [ 0/312 ( 0%)] Loss: 4.42 (4.42) Time: 1.607s, 637.21/s (1.607s, 637.21/s) LR: 1.073e-01 Data: 1.227 (1.227) +Train: 98 [ 50/312 ( 16%)] Loss: 4.42 (4.36) Time: 0.420s, 2440.33/s (0.441s, 2322.18/s) LR: 1.073e-01 Data: 0.025 (0.051) +Train: 98 [ 100/312 ( 32%)] Loss: 4.52 (4.38) Time: 0.422s, 2428.87/s (0.431s, 2375.28/s) LR: 1.073e-01 Data: 0.027 (0.039) +Train: 98 [ 150/312 ( 48%)] Loss: 4.40 (4.40) Time: 0.421s, 2433.01/s (0.427s, 2395.33/s) LR: 1.073e-01 Data: 0.024 (0.035) +Train: 98 [ 200/312 ( 64%)] Loss: 4.51 (4.42) Time: 0.425s, 2410.90/s (0.426s, 2404.20/s) LR: 1.073e-01 Data: 0.029 (0.033) +Train: 98 [ 250/312 ( 80%)] Loss: 4.65 (4.44) Time: 0.423s, 2419.71/s (0.425s, 2408.73/s) LR: 1.073e-01 Data: 0.028 (0.032) +Train: 98 [ 300/312 ( 96%)] Loss: 4.60 (4.46) Time: 0.421s, 2433.79/s (0.424s, 2412.25/s) LR: 1.073e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.433 (1.433) Loss: 4.692 ( 4.692) Acc@1: 23.242 ( 23.242) Acc@5: 40.527 ( 40.527) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.546 ( 4.706) Acc@1: 24.646 ( 23.782) Acc@5: 41.627 ( 40.996) +Train: 99 [ 0/312 ( 0%)] Loss: 4.28 (4.28) Time: 1.753s, 584.06/s (1.753s, 584.06/s) LR: 1.036e-01 Data: 1.160 (1.160) +Train: 99 [ 50/312 ( 16%)] Loss: 4.42 (4.34) Time: 0.423s, 2421.43/s (0.447s, 2290.10/s) LR: 1.036e-01 Data: 0.029 (0.049) +Train: 99 [ 100/312 ( 32%)] Loss: 4.37 (4.36) Time: 0.421s, 2430.14/s (0.434s, 2358.28/s) LR: 1.036e-01 Data: 0.028 (0.038) +Train: 99 [ 150/312 ( 48%)] Loss: 4.48 (4.38) Time: 0.423s, 2421.24/s (0.430s, 2381.28/s) LR: 1.036e-01 Data: 0.027 (0.035) +Train: 99 [ 200/312 ( 64%)] Loss: 4.51 (4.40) Time: 0.421s, 2434.33/s (0.428s, 2393.59/s) LR: 1.036e-01 Data: 0.026 (0.033) +Train: 99 [ 250/312 ( 80%)] Loss: 4.51 (4.42) Time: 0.419s, 2445.45/s (0.427s, 2400.39/s) LR: 1.036e-01 Data: 0.028 (0.032) +Train: 99 [ 300/312 ( 96%)] Loss: 4.47 (4.43) Time: 0.422s, 2427.85/s (0.426s, 2405.56/s) LR: 1.036e-01 Data: 0.029 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.456 (1.456) Loss: 4.666 ( 4.666) Acc@1: 24.023 ( 24.023) Acc@5: 41.602 ( 41.602) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.534 ( 4.747) Acc@1: 23.703 ( 23.358) Acc@5: 42.453 ( 40.620) +Train: 100 [ 0/312 ( 0%)] Loss: 4.24 (4.24) Time: 1.813s, 564.81/s (1.813s, 564.81/s) LR: 1.000e-01 Data: 1.432 (1.432) +Train: 100 [ 50/312 ( 16%)] Loss: 4.37 (4.32) Time: 0.421s, 2435.17/s (0.447s, 2290.44/s) LR: 1.000e-01 Data: 0.026 (0.055) +Train: 100 [ 100/312 ( 32%)] Loss: 4.39 (4.34) Time: 0.422s, 2425.03/s (0.434s, 2361.43/s) LR: 1.000e-01 Data: 0.027 (0.041) +Train: 100 [ 150/312 ( 48%)] Loss: 4.28 (4.36) Time: 0.419s, 2442.02/s (0.430s, 2383.34/s) LR: 1.000e-01 Data: 0.026 (0.037) +Train: 100 [ 200/312 ( 64%)] Loss: 4.40 (4.38) Time: 0.422s, 2429.28/s (0.428s, 2394.36/s) LR: 1.000e-01 Data: 0.028 (0.034) +Train: 100 [ 250/312 ( 80%)] Loss: 4.50 (4.39) Time: 0.417s, 2454.13/s (0.426s, 2401.79/s) LR: 1.000e-01 Data: 0.027 (0.033) +Train: 100 [ 300/312 ( 96%)] Loss: 4.47 (4.41) Time: 0.420s, 2440.50/s (0.425s, 2406.69/s) LR: 1.000e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 4.697 ( 4.697) Acc@1: 23.047 ( 23.047) Acc@5: 40.332 ( 40.332) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.535 ( 4.760) Acc@1: 24.764 ( 23.154) Acc@5: 41.038 ( 40.426) +Train: 101 [ 0/312 ( 0%)] Loss: 4.24 (4.24) Time: 1.612s, 635.22/s (1.612s, 635.22/s) LR: 9.639e-02 Data: 1.231 (1.231) +Train: 101 [ 50/312 ( 16%)] Loss: 4.20 (4.28) Time: 0.421s, 2429.88/s (0.444s, 2306.59/s) LR: 9.639e-02 Data: 0.028 (0.052) +Train: 101 [ 100/312 ( 32%)] Loss: 4.36 (4.30) Time: 0.418s, 2447.10/s (0.433s, 2367.21/s) LR: 9.639e-02 Data: 0.027 (0.040) +Train: 101 [ 150/312 ( 48%)] Loss: 4.43 (4.33) Time: 0.421s, 2431.57/s (0.429s, 2388.99/s) LR: 9.639e-02 Data: 0.028 (0.036) +Train: 101 [ 200/312 ( 64%)] Loss: 4.47 (4.35) Time: 0.421s, 2429.91/s (0.427s, 2397.88/s) LR: 9.639e-02 Data: 0.027 (0.034) +Train: 101 [ 250/312 ( 80%)] Loss: 4.42 (4.37) Time: 0.423s, 2423.22/s (0.426s, 2403.97/s) LR: 9.639e-02 Data: 0.027 (0.033) +Train: 101 [ 300/312 ( 96%)] Loss: 4.50 (4.38) Time: 0.423s, 2421.58/s (0.425s, 2407.92/s) LR: 9.639e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.443 (1.443) Loss: 4.951 ( 4.951) Acc@1: 22.168 ( 22.168) Acc@5: 37.695 ( 37.695) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.764 ( 4.975) Acc@1: 22.524 ( 21.510) Acc@5: 39.741 ( 37.892) +Train: 102 [ 0/312 ( 0%)] Loss: 4.30 (4.30) Time: 1.605s, 638.20/s (1.605s, 638.20/s) LR: 9.283e-02 Data: 1.224 (1.224) +Train: 102 [ 50/312 ( 16%)] Loss: 4.33 (4.27) Time: 0.417s, 2453.16/s (0.443s, 2309.87/s) LR: 9.283e-02 Data: 0.026 (0.051) +Train: 102 [ 100/312 ( 32%)] Loss: 4.27 (4.28) Time: 0.430s, 2383.93/s (0.433s, 2367.04/s) LR: 9.283e-02 Data: 0.026 (0.040) +Train: 102 [ 150/312 ( 48%)] Loss: 4.35 (4.30) Time: 0.418s, 2452.03/s (0.429s, 2388.53/s) LR: 9.283e-02 Data: 0.026 (0.036) +Train: 102 [ 200/312 ( 64%)] Loss: 4.38 (4.32) Time: 0.416s, 2459.80/s (0.427s, 2398.66/s) LR: 9.283e-02 Data: 0.028 (0.033) +Train: 102 [ 250/312 ( 80%)] Loss: 4.42 (4.34) Time: 0.421s, 2433.33/s (0.426s, 2403.98/s) LR: 9.283e-02 Data: 0.027 (0.032) +Train: 102 [ 300/312 ( 96%)] Loss: 4.46 (4.35) Time: 0.420s, 2435.52/s (0.425s, 2408.78/s) LR: 9.283e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.465 (1.465) Loss: 4.814 ( 4.814) Acc@1: 22.656 ( 22.656) Acc@5: 39.258 ( 39.258) +Test: [ 48/48] Time: 0.091 (0.334) Loss: 4.654 ( 4.820) Acc@1: 23.349 ( 22.978) Acc@5: 41.038 ( 39.842) +Train: 103 [ 0/312 ( 0%)] Loss: 4.18 (4.18) Time: 1.995s, 513.35/s (1.995s, 513.35/s) LR: 8.932e-02 Data: 1.615 (1.615) +Train: 103 [ 50/312 ( 16%)] Loss: 4.24 (4.23) Time: 0.417s, 2458.51/s (0.452s, 2266.76/s) LR: 8.932e-02 Data: 0.027 (0.059) +Train: 103 [ 100/312 ( 32%)] Loss: 4.31 (4.26) Time: 0.421s, 2431.57/s (0.436s, 2348.48/s) LR: 8.932e-02 Data: 0.028 (0.044) +Train: 103 [ 150/312 ( 48%)] Loss: 4.35 (4.28) Time: 0.418s, 2449.76/s (0.431s, 2376.36/s) LR: 8.932e-02 Data: 0.027 (0.038) +Train: 103 [ 200/312 ( 64%)] Loss: 4.31 (4.30) Time: 0.422s, 2428.30/s (0.429s, 2389.22/s) LR: 8.932e-02 Data: 0.028 (0.035) +Train: 103 [ 250/312 ( 80%)] Loss: 4.40 (4.31) Time: 0.419s, 2444.74/s (0.427s, 2396.81/s) LR: 8.932e-02 Data: 0.027 (0.034) +Train: 103 [ 300/312 ( 96%)] Loss: 4.47 (4.33) Time: 0.418s, 2447.18/s (0.426s, 2401.88/s) LR: 8.932e-02 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 4.833 ( 4.833) Acc@1: 21.094 ( 21.094) Acc@5: 40.039 ( 40.039) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.583 ( 4.792) Acc@1: 24.292 ( 22.730) Acc@5: 41.038 ( 39.958) +Train: 104 [ 0/312 ( 0%)] Loss: 4.24 (4.24) Time: 1.924s, 532.32/s (1.924s, 532.32/s) LR: 8.586e-02 Data: 1.197 (1.197) +Train: 104 [ 50/312 ( 16%)] Loss: 4.28 (4.20) Time: 0.424s, 2415.60/s (0.449s, 2278.82/s) LR: 8.586e-02 Data: 0.027 (0.050) +Train: 104 [ 100/312 ( 32%)] Loss: 4.33 (4.23) Time: 0.420s, 2436.24/s (0.435s, 2353.26/s) LR: 8.586e-02 Data: 0.028 (0.039) +Train: 104 [ 150/312 ( 48%)] Loss: 4.37 (4.25) Time: 0.419s, 2445.74/s (0.430s, 2379.16/s) LR: 8.586e-02 Data: 0.027 (0.035) +Train: 104 [ 200/312 ( 64%)] Loss: 4.45 (4.26) Time: 0.420s, 2439.40/s (0.428s, 2392.86/s) LR: 8.586e-02 Data: 0.026 (0.033) +Train: 104 [ 250/312 ( 80%)] Loss: 4.28 (4.28) Time: 0.423s, 2420.97/s (0.427s, 2400.45/s) LR: 8.586e-02 Data: 0.028 (0.032) +Train: 104 [ 300/312 ( 96%)] Loss: 4.38 (4.30) Time: 0.419s, 2442.89/s (0.426s, 2405.21/s) LR: 8.586e-02 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 4.821 ( 4.821) Acc@1: 22.070 ( 22.070) Acc@5: 38.379 ( 38.379) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.704 ( 4.844) Acc@1: 23.467 ( 22.576) Acc@5: 39.976 ( 39.310) +Train: 105 [ 0/312 ( 0%)] Loss: 4.19 (4.19) Time: 1.608s, 636.99/s (1.608s, 636.99/s) LR: 8.244e-02 Data: 1.228 (1.228) +Train: 105 [ 50/312 ( 16%)] Loss: 4.31 (4.17) Time: 0.422s, 2424.96/s (0.441s, 2322.54/s) LR: 8.244e-02 Data: 0.028 (0.051) +Train: 105 [ 100/312 ( 32%)] Loss: 4.28 (4.20) Time: 0.419s, 2443.01/s (0.431s, 2375.58/s) LR: 8.244e-02 Data: 0.027 (0.040) +Train: 105 [ 150/312 ( 48%)] Loss: 4.29 (4.22) Time: 0.419s, 2442.54/s (0.428s, 2392.50/s) LR: 8.244e-02 Data: 0.028 (0.036) +Train: 105 [ 200/312 ( 64%)] Loss: 4.39 (4.24) Time: 0.422s, 2424.30/s (0.427s, 2400.45/s) LR: 8.244e-02 Data: 0.026 (0.034) +Train: 105 [ 250/312 ( 80%)] Loss: 4.26 (4.26) Time: 0.419s, 2443.82/s (0.426s, 2406.50/s) LR: 8.244e-02 Data: 0.028 (0.032) +Train: 105 [ 300/312 ( 96%)] Loss: 4.26 (4.27) Time: 0.423s, 2421.66/s (0.425s, 2409.53/s) LR: 8.244e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.457 (1.457) Loss: 4.991 ( 4.991) Acc@1: 21.875 ( 21.875) Acc@5: 37.598 ( 37.598) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.808 ( 5.003) Acc@1: 18.986 ( 20.984) Acc@5: 38.679 ( 37.254) +Train: 106 [ 0/312 ( 0%)] Loss: 4.14 (4.14) Time: 1.707s, 599.85/s (1.707s, 599.85/s) LR: 7.908e-02 Data: 1.280 (1.280) +Train: 106 [ 50/312 ( 16%)] Loss: 4.15 (4.16) Time: 0.423s, 2420.81/s (0.446s, 2296.76/s) LR: 7.908e-02 Data: 0.032 (0.053) +Train: 106 [ 100/312 ( 32%)] Loss: 4.14 (4.18) Time: 0.422s, 2428.05/s (0.434s, 2360.50/s) LR: 7.908e-02 Data: 0.026 (0.041) +Train: 106 [ 150/312 ( 48%)] Loss: 4.27 (4.20) Time: 0.423s, 2420.03/s (0.430s, 2384.13/s) LR: 7.908e-02 Data: 0.026 (0.036) +Train: 106 [ 200/312 ( 64%)] Loss: 4.25 (4.22) Time: 0.423s, 2421.86/s (0.427s, 2395.42/s) LR: 7.908e-02 Data: 0.027 (0.034) +Train: 106 [ 250/312 ( 80%)] Loss: 4.29 (4.23) Time: 0.421s, 2429.83/s (0.426s, 2401.00/s) LR: 7.908e-02 Data: 0.028 (0.033) +Train: 106 [ 300/312 ( 96%)] Loss: 4.41 (4.25) Time: 0.419s, 2446.55/s (0.426s, 2405.34/s) LR: 7.908e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.448 (1.448) Loss: 4.796 ( 4.796) Acc@1: 21.875 ( 21.875) Acc@5: 40.527 ( 40.527) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.631 ( 4.821) Acc@1: 23.231 ( 22.416) Acc@5: 42.335 ( 39.362) +Train: 107 [ 0/312 ( 0%)] Loss: 4.17 (4.17) Time: 1.719s, 595.85/s (1.719s, 595.85/s) LR: 7.577e-02 Data: 1.339 (1.339) +Train: 107 [ 50/312 ( 16%)] Loss: 4.14 (4.13) Time: 0.418s, 2448.05/s (0.445s, 2303.02/s) LR: 7.577e-02 Data: 0.028 (0.053) +Train: 107 [ 100/312 ( 32%)] Loss: 4.16 (4.15) Time: 0.425s, 2409.98/s (0.433s, 2363.61/s) LR: 7.577e-02 Data: 0.027 (0.040) +Train: 107 [ 150/312 ( 48%)] Loss: 4.10 (4.17) Time: 0.419s, 2442.91/s (0.429s, 2386.17/s) LR: 7.577e-02 Data: 0.027 (0.036) +Train: 107 [ 200/312 ( 64%)] Loss: 4.28 (4.19) Time: 0.419s, 2443.43/s (0.427s, 2396.76/s) LR: 7.577e-02 Data: 0.028 (0.034) +Train: 107 [ 250/312 ( 80%)] Loss: 4.31 (4.21) Time: 0.422s, 2424.91/s (0.426s, 2403.38/s) LR: 7.577e-02 Data: 0.027 (0.033) +Train: 107 [ 300/312 ( 96%)] Loss: 4.27 (4.22) Time: 0.426s, 2404.45/s (0.425s, 2407.24/s) LR: 7.577e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.446 (1.446) Loss: 4.843 ( 4.843) Acc@1: 22.070 ( 22.070) Acc@5: 39.062 ( 39.062) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.681 ( 4.848) Acc@1: 21.816 ( 22.296) Acc@5: 41.038 ( 39.232) +Train: 108 [ 0/312 ( 0%)] Loss: 3.99 (3.99) Time: 1.720s, 595.41/s (1.720s, 595.41/s) LR: 7.252e-02 Data: 1.340 (1.340) +Train: 108 [ 50/312 ( 16%)] Loss: 4.09 (4.10) Time: 0.422s, 2429.05/s (0.445s, 2301.13/s) LR: 7.252e-02 Data: 0.028 (0.053) +Train: 108 [ 100/312 ( 32%)] Loss: 4.23 (4.13) Time: 0.426s, 2403.59/s (0.433s, 2362.64/s) LR: 7.252e-02 Data: 0.034 (0.041) +Train: 108 [ 150/312 ( 48%)] Loss: 4.09 (4.14) Time: 0.419s, 2443.68/s (0.429s, 2384.63/s) LR: 7.252e-02 Data: 0.027 (0.036) +Train: 108 [ 200/312 ( 64%)] Loss: 4.20 (4.16) Time: 0.420s, 2440.15/s (0.427s, 2395.32/s) LR: 7.252e-02 Data: 0.030 (0.034) +Train: 108 [ 250/312 ( 80%)] Loss: 4.34 (4.18) Time: 0.425s, 2412.25/s (0.426s, 2402.16/s) LR: 7.252e-02 Data: 0.027 (0.033) +Train: 108 [ 300/312 ( 96%)] Loss: 4.26 (4.19) Time: 0.420s, 2440.63/s (0.426s, 2406.13/s) LR: 7.252e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.428 (1.428) Loss: 5.005 ( 5.005) Acc@1: 21.680 ( 21.680) Acc@5: 36.035 ( 36.035) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.714 ( 4.990) Acc@1: 22.642 ( 21.334) Acc@5: 39.858 ( 37.752) +Train: 109 [ 0/312 ( 0%)] Loss: 4.04 (4.04) Time: 1.837s, 557.56/s (1.837s, 557.56/s) LR: 6.932e-02 Data: 1.457 (1.457) +Train: 109 [ 50/312 ( 16%)] Loss: 4.07 (4.08) Time: 0.420s, 2436.33/s (0.448s, 2287.10/s) LR: 6.932e-02 Data: 0.028 (0.055) +Train: 109 [ 100/312 ( 32%)] Loss: 4.22 (4.10) Time: 0.424s, 2413.54/s (0.435s, 2356.16/s) LR: 6.932e-02 Data: 0.030 (0.041) +Train: 109 [ 150/312 ( 48%)] Loss: 4.13 (4.12) Time: 0.421s, 2434.14/s (0.430s, 2378.84/s) LR: 6.932e-02 Data: 0.026 (0.037) +Train: 109 [ 200/312 ( 64%)] Loss: 4.24 (4.14) Time: 0.418s, 2448.62/s (0.428s, 2391.19/s) LR: 6.932e-02 Data: 0.028 (0.034) +Train: 109 [ 250/312 ( 80%)] Loss: 4.22 (4.15) Time: 0.421s, 2430.48/s (0.427s, 2400.05/s) LR: 6.932e-02 Data: 0.026 (0.033) +Train: 109 [ 300/312 ( 96%)] Loss: 4.17 (4.17) Time: 0.422s, 2426.72/s (0.426s, 2404.82/s) LR: 6.932e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.461 (1.461) Loss: 5.013 ( 5.013) Acc@1: 21.582 ( 21.582) Acc@5: 36.133 ( 36.133) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.804 ( 5.017) Acc@1: 23.113 ( 21.038) Acc@5: 40.448 ( 37.578) +Train: 110 [ 0/312 ( 0%)] Loss: 4.04 (4.04) Time: 1.737s, 589.50/s (1.737s, 589.50/s) LR: 6.617e-02 Data: 1.358 (1.358) +Train: 110 [ 50/312 ( 16%)] Loss: 4.13 (4.05) Time: 0.423s, 2418.88/s (0.446s, 2294.66/s) LR: 6.617e-02 Data: 0.027 (0.053) +Train: 110 [ 100/312 ( 32%)] Loss: 4.02 (4.07) Time: 0.424s, 2412.38/s (0.434s, 2360.50/s) LR: 6.617e-02 Data: 0.029 (0.041) +Train: 110 [ 150/312 ( 48%)] Loss: 4.12 (4.09) Time: 0.421s, 2429.77/s (0.430s, 2383.30/s) LR: 6.617e-02 Data: 0.027 (0.036) +Train: 110 [ 200/312 ( 64%)] Loss: 4.14 (4.11) Time: 0.421s, 2429.47/s (0.428s, 2393.48/s) LR: 6.617e-02 Data: 0.029 (0.034) +Train: 110 [ 250/312 ( 80%)] Loss: 4.31 (4.12) Time: 0.422s, 2427.84/s (0.427s, 2400.09/s) LR: 6.617e-02 Data: 0.029 (0.033) +Train: 110 [ 300/312 ( 96%)] Loss: 4.27 (4.14) Time: 0.424s, 2413.35/s (0.426s, 2404.56/s) LR: 6.617e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.443 (1.443) Loss: 5.081 ( 5.081) Acc@1: 20.898 ( 20.898) Acc@5: 37.598 ( 37.598) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.851 ( 5.050) Acc@1: 19.811 ( 20.712) Acc@5: 39.387 ( 37.196) +Train: 111 [ 0/312 ( 0%)] Loss: 3.97 (3.97) Time: 1.644s, 623.05/s (1.644s, 623.05/s) LR: 6.309e-02 Data: 1.263 (1.263) +Train: 111 [ 50/312 ( 16%)] Loss: 4.03 (4.04) Time: 0.417s, 2456.26/s (0.444s, 2304.22/s) LR: 6.309e-02 Data: 0.030 (0.052) +Train: 111 [ 100/312 ( 32%)] Loss: 4.01 (4.06) Time: 0.424s, 2413.55/s (0.433s, 2363.65/s) LR: 6.309e-02 Data: 0.026 (0.040) +Train: 111 [ 150/312 ( 48%)] Loss: 4.10 (4.08) Time: 0.421s, 2433.42/s (0.429s, 2384.29/s) LR: 6.309e-02 Data: 0.028 (0.036) +Train: 111 [ 200/312 ( 64%)] Loss: 4.18 (4.09) Time: 0.421s, 2432.93/s (0.427s, 2396.03/s) LR: 6.309e-02 Data: 0.027 (0.034) +Train: 111 [ 250/312 ( 80%)] Loss: 4.21 (4.11) Time: 0.422s, 2427.39/s (0.426s, 2402.59/s) LR: 6.309e-02 Data: 0.028 (0.032) +Train: 111 [ 300/312 ( 96%)] Loss: 4.26 (4.12) Time: 0.428s, 2392.07/s (0.425s, 2407.91/s) LR: 6.309e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.456 (1.456) Loss: 4.965 ( 4.965) Acc@1: 22.559 ( 22.559) Acc@5: 37.988 ( 37.988) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.800 ( 4.974) Acc@1: 20.991 ( 21.336) Acc@5: 38.797 ( 38.018) +Train: 112 [ 0/312 ( 0%)] Loss: 4.07 (4.07) Time: 1.634s, 626.54/s (1.634s, 626.54/s) LR: 6.007e-02 Data: 1.253 (1.253) +Train: 112 [ 50/312 ( 16%)] Loss: 4.04 (4.01) Time: 0.418s, 2451.59/s (0.443s, 2312.01/s) LR: 6.007e-02 Data: 0.027 (0.051) +Train: 112 [ 100/312 ( 32%)] Loss: 3.99 (4.02) Time: 0.419s, 2445.80/s (0.432s, 2370.50/s) LR: 6.007e-02 Data: 0.026 (0.039) +Train: 112 [ 150/312 ( 48%)] Loss: 4.05 (4.04) Time: 0.421s, 2431.48/s (0.428s, 2389.84/s) LR: 6.007e-02 Data: 0.027 (0.035) +Train: 112 [ 200/312 ( 64%)] Loss: 4.16 (4.06) Time: 0.424s, 2417.31/s (0.427s, 2400.25/s) LR: 6.007e-02 Data: 0.027 (0.033) +Train: 112 [ 250/312 ( 80%)] Loss: 4.15 (4.07) Time: 0.421s, 2430.17/s (0.426s, 2405.61/s) LR: 6.007e-02 Data: 0.027 (0.032) +Train: 112 [ 300/312 ( 96%)] Loss: 4.06 (4.09) Time: 0.421s, 2433.86/s (0.425s, 2409.18/s) LR: 6.007e-02 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.448 (1.448) Loss: 4.979 ( 4.979) Acc@1: 21.582 ( 21.582) Acc@5: 38.477 ( 38.477) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.746 ( 5.011) Acc@1: 21.226 ( 21.222) Acc@5: 38.915 ( 37.800) +Train: 113 [ 0/312 ( 0%)] Loss: 3.92 (3.92) Time: 2.061s, 496.95/s (2.061s, 496.95/s) LR: 5.711e-02 Data: 1.680 (1.680) +Train: 113 [ 50/312 ( 16%)] Loss: 4.09 (3.99) Time: 0.422s, 2424.57/s (0.450s, 2276.63/s) LR: 5.711e-02 Data: 0.029 (0.060) +Train: 113 [ 100/312 ( 32%)] Loss: 4.06 (4.01) Time: 0.421s, 2434.70/s (0.435s, 2353.93/s) LR: 5.711e-02 Data: 0.024 (0.043) +Train: 113 [ 150/312 ( 48%)] Loss: 3.99 (4.02) Time: 0.417s, 2453.15/s (0.430s, 2379.25/s) LR: 5.711e-02 Data: 0.028 (0.038) +Train: 113 [ 200/312 ( 64%)] Loss: 4.07 (4.04) Time: 0.426s, 2404.72/s (0.428s, 2391.89/s) LR: 5.711e-02 Data: 0.026 (0.035) +Train: 113 [ 250/312 ( 80%)] Loss: 4.15 (4.05) Time: 0.421s, 2430.99/s (0.427s, 2398.83/s) LR: 5.711e-02 Data: 0.027 (0.034) +Train: 113 [ 300/312 ( 96%)] Loss: 4.13 (4.07) Time: 0.418s, 2449.82/s (0.426s, 2404.47/s) LR: 5.711e-02 Data: 0.029 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.456 (1.456) Loss: 4.901 ( 4.901) Acc@1: 22.461 ( 22.461) Acc@5: 38.672 ( 38.672) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.690 ( 4.945) Acc@1: 24.292 ( 22.046) Acc@5: 40.448 ( 38.554) +Train: 114 [ 0/312 ( 0%)] Loss: 3.92 (3.92) Time: 1.601s, 639.60/s (1.601s, 639.60/s) LR: 5.421e-02 Data: 1.096 (1.096) +Train: 114 [ 50/312 ( 16%)] Loss: 4.02 (3.95) Time: 0.420s, 2436.12/s (0.444s, 2307.10/s) LR: 5.421e-02 Data: 0.028 (0.049) +Train: 114 [ 100/312 ( 32%)] Loss: 3.95 (3.97) Time: 0.426s, 2403.30/s (0.433s, 2366.96/s) LR: 5.421e-02 Data: 0.030 (0.038) +Train: 114 [ 150/312 ( 48%)] Loss: 4.10 (3.99) Time: 0.421s, 2435.18/s (0.429s, 2388.27/s) LR: 5.421e-02 Data: 0.028 (0.034) +Train: 114 [ 200/312 ( 64%)] Loss: 4.10 (4.01) Time: 0.422s, 2425.77/s (0.427s, 2398.38/s) LR: 5.421e-02 Data: 0.025 (0.033) +Train: 114 [ 250/312 ( 80%)] Loss: 4.02 (4.02) Time: 0.420s, 2436.70/s (0.426s, 2404.43/s) LR: 5.421e-02 Data: 0.027 (0.032) +Train: 114 [ 300/312 ( 96%)] Loss: 4.20 (4.03) Time: 0.423s, 2419.97/s (0.425s, 2409.06/s) LR: 5.421e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.477 (1.477) Loss: 5.036 ( 5.036) Acc@1: 21.777 ( 21.777) Acc@5: 37.598 ( 37.598) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.729 ( 5.037) Acc@1: 22.524 ( 21.168) Acc@5: 38.443 ( 37.498) +Train: 115 [ 0/312 ( 0%)] Loss: 3.94 (3.94) Time: 1.787s, 573.07/s (1.787s, 573.07/s) LR: 5.137e-02 Data: 1.407 (1.407) +Train: 115 [ 50/312 ( 16%)] Loss: 4.02 (3.94) Time: 0.425s, 2410.08/s (0.447s, 2290.74/s) LR: 5.137e-02 Data: 0.027 (0.054) +Train: 115 [ 100/312 ( 32%)] Loss: 3.99 (3.95) Time: 0.425s, 2407.06/s (0.434s, 2357.47/s) LR: 5.137e-02 Data: 0.027 (0.041) +Train: 115 [ 150/312 ( 48%)] Loss: 4.02 (3.97) Time: 0.422s, 2429.22/s (0.430s, 2380.17/s) LR: 5.137e-02 Data: 0.027 (0.036) +Train: 115 [ 200/312 ( 64%)] Loss: 4.12 (3.99) Time: 0.422s, 2429.11/s (0.428s, 2389.85/s) LR: 5.137e-02 Data: 0.027 (0.034) +Train: 115 [ 250/312 ( 80%)] Loss: 4.11 (4.00) Time: 0.424s, 2415.17/s (0.427s, 2397.25/s) LR: 5.137e-02 Data: 0.027 (0.033) +Train: 115 [ 300/312 ( 96%)] Loss: 4.11 (4.01) Time: 0.418s, 2448.96/s (0.426s, 2402.51/s) LR: 5.137e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 4.979 ( 4.979) Acc@1: 20.410 ( 20.410) Acc@5: 38.086 ( 38.086) +Test: [ 48/48] Time: 0.092 (0.328) Loss: 4.769 ( 4.992) Acc@1: 21.698 ( 21.404) Acc@5: 39.387 ( 37.750) +Train: 116 [ 0/312 ( 0%)] Loss: 3.85 (3.85) Time: 1.555s, 658.43/s (1.555s, 658.43/s) LR: 4.860e-02 Data: 1.086 (1.086) +Train: 116 [ 50/312 ( 16%)] Loss: 4.00 (3.91) Time: 0.417s, 2454.12/s (0.453s, 2259.92/s) LR: 4.860e-02 Data: 0.027 (0.047) +Train: 116 [ 100/312 ( 32%)] Loss: 4.02 (3.94) Time: 0.423s, 2423.39/s (0.437s, 2342.16/s) LR: 4.860e-02 Data: 0.027 (0.037) +Train: 116 [ 150/312 ( 48%)] Loss: 3.96 (3.95) Time: 0.424s, 2413.57/s (0.432s, 2371.47/s) LR: 4.860e-02 Data: 0.027 (0.034) +Train: 116 [ 200/312 ( 64%)] Loss: 4.04 (3.96) Time: 0.419s, 2446.63/s (0.429s, 2385.84/s) LR: 4.860e-02 Data: 0.028 (0.033) +Train: 116 [ 250/312 ( 80%)] Loss: 4.08 (3.97) Time: 0.421s, 2434.17/s (0.428s, 2394.68/s) LR: 4.860e-02 Data: 0.027 (0.032) +Train: 116 [ 300/312 ( 96%)] Loss: 4.02 (3.98) Time: 0.418s, 2451.50/s (0.426s, 2401.31/s) LR: 4.860e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.438 (1.438) Loss: 5.068 ( 5.068) Acc@1: 21.094 ( 21.094) Acc@5: 38.379 ( 38.379) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.723 ( 4.994) Acc@1: 21.934 ( 21.476) Acc@5: 39.151 ( 37.718) +Train: 117 [ 0/312 ( 0%)] Loss: 3.88 (3.88) Time: 1.663s, 615.87/s (1.663s, 615.87/s) LR: 4.590e-02 Data: 1.283 (1.283) +Train: 117 [ 50/312 ( 16%)] Loss: 3.76 (3.89) Time: 0.417s, 2456.15/s (0.454s, 2256.93/s) LR: 4.590e-02 Data: 0.028 (0.062) +Train: 117 [ 100/312 ( 32%)] Loss: 3.89 (3.90) Time: 0.419s, 2441.00/s (0.437s, 2341.17/s) LR: 4.590e-02 Data: 0.027 (0.045) +Train: 117 [ 150/312 ( 48%)] Loss: 4.12 (3.93) Time: 0.422s, 2426.77/s (0.432s, 2369.84/s) LR: 4.590e-02 Data: 0.028 (0.039) +Train: 117 [ 200/312 ( 64%)] Loss: 3.94 (3.94) Time: 0.423s, 2423.34/s (0.429s, 2386.02/s) LR: 4.590e-02 Data: 0.026 (0.036) +Train: 117 [ 250/312 ( 80%)] Loss: 3.98 (3.95) Time: 0.423s, 2419.71/s (0.427s, 2395.98/s) LR: 4.590e-02 Data: 0.026 (0.034) +Train: 117 [ 300/312 ( 96%)] Loss: 4.04 (3.96) Time: 0.422s, 2425.89/s (0.426s, 2402.10/s) LR: 4.590e-02 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.448 (1.448) Loss: 5.125 ( 5.125) Acc@1: 19.727 ( 19.727) Acc@5: 37.598 ( 37.598) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.862 ( 5.100) Acc@1: 20.873 ( 20.652) Acc@5: 38.797 ( 36.634) +Train: 118 [ 0/312 ( 0%)] Loss: 3.92 (3.92) Time: 1.794s, 570.65/s (1.794s, 570.65/s) LR: 4.326e-02 Data: 1.415 (1.415) +Train: 118 [ 50/312 ( 16%)] Loss: 3.78 (3.87) Time: 0.428s, 2391.13/s (0.447s, 2291.58/s) LR: 4.326e-02 Data: 0.028 (0.054) +Train: 118 [ 100/312 ( 32%)] Loss: 3.93 (3.88) Time: 0.422s, 2426.23/s (0.434s, 2359.82/s) LR: 4.326e-02 Data: 0.028 (0.041) +Train: 118 [ 150/312 ( 48%)] Loss: 3.97 (3.90) Time: 0.419s, 2445.66/s (0.430s, 2384.14/s) LR: 4.326e-02 Data: 0.027 (0.036) +Train: 118 [ 200/312 ( 64%)] Loss: 4.01 (3.91) Time: 0.418s, 2450.48/s (0.427s, 2396.32/s) LR: 4.326e-02 Data: 0.029 (0.034) +Train: 118 [ 250/312 ( 80%)] Loss: 3.91 (3.93) Time: 0.421s, 2433.81/s (0.426s, 2403.28/s) LR: 4.326e-02 Data: 0.026 (0.033) +Train: 118 [ 300/312 ( 96%)] Loss: 3.87 (3.94) Time: 0.426s, 2401.07/s (0.425s, 2407.77/s) LR: 4.326e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 5.133 ( 5.133) Acc@1: 19.238 ( 19.238) Acc@5: 37.402 ( 37.402) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.829 ( 5.105) Acc@1: 22.877 ( 20.376) Acc@5: 38.561 ( 36.450) +Train: 119 [ 0/312 ( 0%)] Loss: 3.81 (3.81) Time: 1.551s, 660.05/s (1.551s, 660.05/s) LR: 4.069e-02 Data: 1.172 (1.172) +Train: 119 [ 50/312 ( 16%)] Loss: 3.86 (3.85) Time: 0.418s, 2447.96/s (0.442s, 2317.62/s) LR: 4.069e-02 Data: 0.028 (0.052) +Train: 119 [ 100/312 ( 32%)] Loss: 3.80 (3.86) Time: 0.423s, 2423.28/s (0.431s, 2373.24/s) LR: 4.069e-02 Data: 0.029 (0.040) +Train: 119 [ 150/312 ( 48%)] Loss: 3.80 (3.88) Time: 0.419s, 2444.30/s (0.428s, 2390.35/s) LR: 4.069e-02 Data: 0.025 (0.036) +Train: 119 [ 200/312 ( 64%)] Loss: 3.90 (3.89) Time: 0.421s, 2433.05/s (0.427s, 2400.80/s) LR: 4.069e-02 Data: 0.030 (0.034) +Train: 119 [ 250/312 ( 80%)] Loss: 4.01 (3.90) Time: 0.423s, 2418.37/s (0.426s, 2405.85/s) LR: 4.069e-02 Data: 0.027 (0.033) +Train: 119 [ 300/312 ( 96%)] Loss: 4.08 (3.91) Time: 0.422s, 2424.99/s (0.425s, 2409.29/s) LR: 4.069e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.444 (1.444) Loss: 5.147 ( 5.147) Acc@1: 20.020 ( 20.020) Acc@5: 36.719 ( 36.719) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.886 ( 5.125) Acc@1: 19.693 ( 20.242) Acc@5: 36.910 ( 36.392) +Train: 120 [ 0/312 ( 0%)] Loss: 3.79 (3.79) Time: 1.633s, 627.13/s (1.633s, 627.13/s) LR: 3.820e-02 Data: 1.253 (1.253) +Train: 120 [ 50/312 ( 16%)] Loss: 3.90 (3.84) Time: 0.424s, 2412.73/s (0.444s, 2304.25/s) LR: 3.820e-02 Data: 0.031 (0.053) +Train: 120 [ 100/312 ( 32%)] Loss: 3.85 (3.85) Time: 0.422s, 2427.04/s (0.433s, 2365.13/s) LR: 3.820e-02 Data: 0.027 (0.040) +Train: 120 [ 150/312 ( 48%)] Loss: 3.92 (3.86) Time: 0.422s, 2425.78/s (0.429s, 2386.43/s) LR: 3.820e-02 Data: 0.027 (0.036) +Train: 120 [ 200/312 ( 64%)] Loss: 3.97 (3.87) Time: 0.420s, 2436.47/s (0.427s, 2397.25/s) LR: 3.820e-02 Data: 0.026 (0.034) +Train: 120 [ 250/312 ( 80%)] Loss: 3.87 (3.88) Time: 0.418s, 2448.54/s (0.426s, 2403.82/s) LR: 3.820e-02 Data: 0.027 (0.032) +Train: 120 [ 300/312 ( 96%)] Loss: 4.04 (3.89) Time: 0.421s, 2431.61/s (0.425s, 2407.95/s) LR: 3.820e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.458 (1.458) Loss: 5.076 ( 5.076) Acc@1: 20.117 ( 20.117) Acc@5: 36.816 ( 36.816) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.824 ( 5.097) Acc@1: 20.755 ( 20.818) Acc@5: 39.033 ( 36.940) +Train: 121 [ 0/312 ( 0%)] Loss: 3.71 (3.71) Time: 1.802s, 568.35/s (1.802s, 568.35/s) LR: 3.577e-02 Data: 1.421 (1.421) +Train: 121 [ 50/312 ( 16%)] Loss: 3.81 (3.80) Time: 0.423s, 2421.35/s (0.447s, 2292.22/s) LR: 3.577e-02 Data: 0.029 (0.055) +Train: 121 [ 100/312 ( 32%)] Loss: 3.93 (3.82) Time: 0.422s, 2426.96/s (0.434s, 2356.78/s) LR: 3.577e-02 Data: 0.027 (0.041) +Train: 121 [ 150/312 ( 48%)] Loss: 3.90 (3.83) Time: 0.419s, 2441.21/s (0.430s, 2380.45/s) LR: 3.577e-02 Data: 0.026 (0.037) +Train: 121 [ 200/312 ( 64%)] Loss: 3.91 (3.84) Time: 0.419s, 2445.67/s (0.428s, 2393.12/s) LR: 3.577e-02 Data: 0.027 (0.034) +Train: 121 [ 250/312 ( 80%)] Loss: 3.85 (3.85) Time: 0.420s, 2439.51/s (0.427s, 2399.49/s) LR: 3.577e-02 Data: 0.026 (0.033) +Train: 121 [ 300/312 ( 96%)] Loss: 3.91 (3.86) Time: 0.424s, 2414.63/s (0.426s, 2404.70/s) LR: 3.577e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.461 (1.461) Loss: 5.016 ( 5.016) Acc@1: 20.605 ( 20.605) Acc@5: 38.770 ( 38.770) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.773 ( 5.035) Acc@1: 22.170 ( 20.994) Acc@5: 39.033 ( 37.150) +Train: 122 [ 0/312 ( 0%)] Loss: 3.72 (3.72) Time: 1.592s, 643.07/s (1.592s, 643.07/s) LR: 3.342e-02 Data: 1.176 (1.176) +Train: 122 [ 50/312 ( 16%)] Loss: 3.69 (3.80) Time: 0.422s, 2427.33/s (0.443s, 2312.29/s) LR: 3.342e-02 Data: 0.027 (0.050) +Train: 122 [ 100/312 ( 32%)] Loss: 3.88 (3.81) Time: 0.422s, 2427.56/s (0.432s, 2368.66/s) LR: 3.342e-02 Data: 0.029 (0.039) +Train: 122 [ 150/312 ( 48%)] Loss: 3.75 (3.82) Time: 0.419s, 2443.44/s (0.429s, 2389.28/s) LR: 3.342e-02 Data: 0.028 (0.035) +Train: 122 [ 200/312 ( 64%)] Loss: 3.77 (3.82) Time: 0.422s, 2426.21/s (0.427s, 2398.04/s) LR: 3.342e-02 Data: 0.029 (0.033) +Train: 122 [ 250/312 ( 80%)] Loss: 3.89 (3.83) Time: 0.430s, 2381.45/s (0.426s, 2403.50/s) LR: 3.342e-02 Data: 0.029 (0.032) +Train: 122 [ 300/312 ( 96%)] Loss: 3.92 (3.84) Time: 0.424s, 2415.85/s (0.425s, 2407.24/s) LR: 3.342e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.452 (1.452) Loss: 5.057 ( 5.057) Acc@1: 21.582 ( 21.582) Acc@5: 37.695 ( 37.695) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.856 ( 5.066) Acc@1: 20.755 ( 21.142) Acc@5: 39.269 ( 37.254) +Train: 123 [ 0/312 ( 0%)] Loss: 3.77 (3.77) Time: 1.686s, 607.53/s (1.686s, 607.53/s) LR: 3.113e-02 Data: 1.306 (1.306) +Train: 123 [ 50/312 ( 16%)] Loss: 3.77 (3.78) Time: 0.421s, 2430.96/s (0.445s, 2300.77/s) LR: 3.113e-02 Data: 0.028 (0.053) +Train: 123 [ 100/312 ( 32%)] Loss: 3.69 (3.78) Time: 0.423s, 2421.47/s (0.433s, 2363.75/s) LR: 3.113e-02 Data: 0.029 (0.040) +Train: 123 [ 150/312 ( 48%)] Loss: 3.81 (3.79) Time: 0.422s, 2428.80/s (0.429s, 2386.08/s) LR: 3.113e-02 Data: 0.028 (0.036) +Train: 123 [ 200/312 ( 64%)] Loss: 3.90 (3.80) Time: 0.420s, 2436.97/s (0.427s, 2395.71/s) LR: 3.113e-02 Data: 0.027 (0.034) +Train: 123 [ 250/312 ( 80%)] Loss: 3.89 (3.81) Time: 0.420s, 2437.96/s (0.426s, 2401.02/s) LR: 3.113e-02 Data: 0.027 (0.033) +Train: 123 [ 300/312 ( 96%)] Loss: 3.87 (3.82) Time: 0.420s, 2440.70/s (0.426s, 2405.01/s) LR: 3.113e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 5.175 ( 5.175) Acc@1: 20.312 ( 20.312) Acc@5: 36.523 ( 36.523) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.879 ( 5.184) Acc@1: 21.934 ( 19.766) Acc@5: 38.443 ( 35.866) +Train: 124 [ 0/312 ( 0%)] Loss: 3.70 (3.70) Time: 2.143s, 477.91/s (2.143s, 477.91/s) LR: 2.893e-02 Data: 1.762 (1.762) +Train: 124 [ 50/312 ( 16%)] Loss: 3.78 (3.74) Time: 0.418s, 2447.18/s (0.454s, 2255.20/s) LR: 2.893e-02 Data: 0.028 (0.061) +Train: 124 [ 100/312 ( 32%)] Loss: 3.84 (3.77) Time: 0.422s, 2424.03/s (0.438s, 2338.13/s) LR: 2.893e-02 Data: 0.027 (0.044) +Train: 124 [ 150/312 ( 48%)] Loss: 3.83 (3.78) Time: 0.425s, 2410.73/s (0.433s, 2367.20/s) LR: 2.893e-02 Data: 0.027 (0.039) +Train: 124 [ 200/312 ( 64%)] Loss: 3.83 (3.78) Time: 0.427s, 2399.06/s (0.430s, 2381.45/s) LR: 2.893e-02 Data: 0.026 (0.036) +Train: 124 [ 250/312 ( 80%)] Loss: 3.81 (3.79) Time: 0.417s, 2457.52/s (0.428s, 2390.73/s) LR: 2.893e-02 Data: 0.028 (0.034) +Train: 124 [ 300/312 ( 96%)] Loss: 3.79 (3.80) Time: 0.420s, 2435.63/s (0.427s, 2396.62/s) LR: 2.893e-02 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.448 (1.448) Loss: 5.034 ( 5.034) Acc@1: 20.703 ( 20.703) Acc@5: 38.184 ( 38.184) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.791 ( 5.045) Acc@1: 21.226 ( 21.172) Acc@5: 39.387 ( 37.382) +Train: 125 [ 0/312 ( 0%)] Loss: 3.66 (3.66) Time: 1.714s, 597.53/s (1.714s, 597.53/s) LR: 2.679e-02 Data: 1.332 (1.332) +Train: 125 [ 50/312 ( 16%)] Loss: 3.72 (3.73) Time: 0.428s, 2394.26/s (0.447s, 2290.76/s) LR: 2.679e-02 Data: 0.033 (0.053) +Train: 125 [ 100/312 ( 32%)] Loss: 3.73 (3.75) Time: 0.419s, 2443.05/s (0.434s, 2358.13/s) LR: 2.679e-02 Data: 0.026 (0.040) +Train: 125 [ 150/312 ( 48%)] Loss: 3.77 (3.76) Time: 0.421s, 2430.01/s (0.430s, 2381.00/s) LR: 2.679e-02 Data: 0.028 (0.036) +Train: 125 [ 200/312 ( 64%)] Loss: 3.83 (3.76) Time: 0.417s, 2456.47/s (0.428s, 2393.64/s) LR: 2.679e-02 Data: 0.027 (0.034) +Train: 125 [ 250/312 ( 80%)] Loss: 3.82 (3.77) Time: 0.423s, 2423.31/s (0.427s, 2400.68/s) LR: 2.679e-02 Data: 0.027 (0.033) +Train: 125 [ 300/312 ( 96%)] Loss: 3.85 (3.77) Time: 0.422s, 2424.50/s (0.426s, 2404.58/s) LR: 2.679e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.508 (1.508) Loss: 5.149 ( 5.149) Acc@1: 20.117 ( 20.117) Acc@5: 37.012 ( 37.012) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.899 ( 5.152) Acc@1: 20.637 ( 20.188) Acc@5: 36.321 ( 36.288) +Train: 126 [ 0/312 ( 0%)] Loss: 3.79 (3.79) Time: 1.793s, 571.13/s (1.793s, 571.13/s) LR: 2.474e-02 Data: 1.413 (1.413) +Train: 126 [ 50/312 ( 16%)] Loss: 3.70 (3.72) Time: 0.419s, 2442.06/s (0.446s, 2296.74/s) LR: 2.474e-02 Data: 0.027 (0.055) +Train: 126 [ 100/312 ( 32%)] Loss: 3.62 (3.73) Time: 0.417s, 2458.38/s (0.434s, 2361.77/s) LR: 2.474e-02 Data: 0.027 (0.041) +Train: 126 [ 150/312 ( 48%)] Loss: 3.75 (3.74) Time: 0.420s, 2435.51/s (0.430s, 2383.17/s) LR: 2.474e-02 Data: 0.028 (0.037) +Train: 126 [ 200/312 ( 64%)] Loss: 3.75 (3.74) Time: 0.423s, 2423.00/s (0.428s, 2393.70/s) LR: 2.474e-02 Data: 0.028 (0.035) +Train: 126 [ 250/312 ( 80%)] Loss: 3.68 (3.75) Time: 0.417s, 2458.00/s (0.427s, 2400.64/s) LR: 2.474e-02 Data: 0.027 (0.033) +Train: 126 [ 300/312 ( 96%)] Loss: 3.87 (3.75) Time: 0.423s, 2422.69/s (0.426s, 2405.96/s) LR: 2.474e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.457 (1.457) Loss: 5.209 ( 5.209) Acc@1: 19.434 ( 19.434) Acc@5: 37.305 ( 37.305) +Test: [ 48/48] Time: 0.092 (0.333) Loss: 4.933 ( 5.215) Acc@1: 19.811 ( 19.902) Acc@5: 38.443 ( 35.718) +Train: 127 [ 0/312 ( 0%)] Loss: 3.82 (3.82) Time: 1.514s, 676.48/s (1.514s, 676.48/s) LR: 2.276e-02 Data: 1.120 (1.120) +Train: 127 [ 50/312 ( 16%)] Loss: 3.66 (3.70) Time: 0.423s, 2419.42/s (0.442s, 2317.29/s) LR: 2.276e-02 Data: 0.024 (0.048) +Train: 127 [ 100/312 ( 32%)] Loss: 3.85 (3.71) Time: 0.423s, 2423.39/s (0.432s, 2369.96/s) LR: 2.276e-02 Data: 0.027 (0.038) +Train: 127 [ 150/312 ( 48%)] Loss: 3.77 (3.71) Time: 0.430s, 2379.57/s (0.428s, 2389.87/s) LR: 2.276e-02 Data: 0.027 (0.034) +Train: 127 [ 200/312 ( 64%)] Loss: 3.84 (3.72) Time: 0.419s, 2445.28/s (0.427s, 2399.30/s) LR: 2.276e-02 Data: 0.027 (0.033) +Train: 127 [ 250/312 ( 80%)] Loss: 3.67 (3.72) Time: 0.417s, 2456.31/s (0.426s, 2406.22/s) LR: 2.276e-02 Data: 0.027 (0.032) +Train: 127 [ 300/312 ( 96%)] Loss: 3.79 (3.73) Time: 0.423s, 2420.39/s (0.425s, 2410.91/s) LR: 2.276e-02 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.447 (1.447) Loss: 5.145 ( 5.145) Acc@1: 20.996 ( 20.996) Acc@5: 35.449 ( 35.449) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.873 ( 5.142) Acc@1: 21.344 ( 20.404) Acc@5: 38.325 ( 36.270) +Train: 128 [ 0/312 ( 0%)] Loss: 3.71 (3.71) Time: 1.527s, 670.53/s (1.527s, 670.53/s) LR: 2.086e-02 Data: 1.148 (1.148) +Train: 128 [ 50/312 ( 16%)] Loss: 3.70 (3.68) Time: 0.420s, 2440.74/s (0.442s, 2316.86/s) LR: 2.086e-02 Data: 0.029 (0.050) +Train: 128 [ 100/312 ( 32%)] Loss: 3.71 (3.69) Time: 0.419s, 2442.12/s (0.432s, 2371.17/s) LR: 2.086e-02 Data: 0.028 (0.039) +Train: 128 [ 150/312 ( 48%)] Loss: 3.82 (3.70) Time: 0.423s, 2421.65/s (0.428s, 2390.39/s) LR: 2.086e-02 Data: 0.028 (0.035) +Train: 128 [ 200/312 ( 64%)] Loss: 3.68 (3.70) Time: 0.419s, 2445.24/s (0.427s, 2399.36/s) LR: 2.086e-02 Data: 0.029 (0.033) +Train: 128 [ 250/312 ( 80%)] Loss: 3.73 (3.71) Time: 0.425s, 2411.87/s (0.426s, 2405.73/s) LR: 2.086e-02 Data: 0.028 (0.032) +Train: 128 [ 300/312 ( 96%)] Loss: 3.90 (3.72) Time: 0.424s, 2417.71/s (0.425s, 2409.34/s) LR: 2.086e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 5.180 ( 5.180) Acc@1: 19.238 ( 19.238) Acc@5: 35.449 ( 35.449) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.860 ( 5.137) Acc@1: 20.991 ( 20.198) Acc@5: 38.679 ( 36.190) +Train: 129 [ 0/312 ( 0%)] Loss: 3.67 (3.67) Time: 1.559s, 656.97/s (1.559s, 656.97/s) LR: 1.903e-02 Data: 1.177 (1.177) +Train: 129 [ 50/312 ( 16%)] Loss: 3.69 (3.67) Time: 0.426s, 2403.05/s (0.442s, 2318.67/s) LR: 1.903e-02 Data: 0.028 (0.050) +Train: 129 [ 100/312 ( 32%)] Loss: 3.63 (3.67) Time: 0.419s, 2446.12/s (0.432s, 2371.16/s) LR: 1.903e-02 Data: 0.025 (0.038) +Train: 129 [ 150/312 ( 48%)] Loss: 3.61 (3.68) Time: 0.426s, 2403.04/s (0.429s, 2387.23/s) LR: 1.903e-02 Data: 0.026 (0.035) +Train: 129 [ 200/312 ( 64%)] Loss: 3.75 (3.69) Time: 0.417s, 2456.22/s (0.427s, 2397.02/s) LR: 1.903e-02 Data: 0.026 (0.033) +Train: 129 [ 250/312 ( 80%)] Loss: 3.68 (3.69) Time: 0.423s, 2418.85/s (0.426s, 2403.84/s) LR: 1.903e-02 Data: 0.026 (0.032) +Train: 129 [ 300/312 ( 96%)] Loss: 3.85 (3.69) Time: 0.420s, 2438.03/s (0.425s, 2407.73/s) LR: 1.903e-02 Data: 0.029 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.429 (1.429) Loss: 5.257 ( 5.257) Acc@1: 18.555 ( 18.555) Acc@5: 35.156 ( 35.156) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.928 ( 5.215) Acc@1: 20.283 ( 20.020) Acc@5: 37.736 ( 35.746) +Train: 130 [ 0/312 ( 0%)] Loss: 3.51 (3.51) Time: 1.862s, 550.02/s (1.862s, 550.02/s) LR: 1.729e-02 Data: 1.481 (1.481) +Train: 130 [ 50/312 ( 16%)] Loss: 3.78 (3.65) Time: 0.422s, 2424.22/s (0.453s, 2259.70/s) LR: 1.729e-02 Data: 0.034 (0.063) +Train: 130 [ 100/312 ( 32%)] Loss: 3.77 (3.66) Time: 0.421s, 2432.26/s (0.437s, 2341.39/s) LR: 1.729e-02 Data: 0.028 (0.045) +Train: 130 [ 150/312 ( 48%)] Loss: 3.73 (3.66) Time: 0.423s, 2421.94/s (0.432s, 2369.90/s) LR: 1.729e-02 Data: 0.028 (0.040) +Train: 130 [ 200/312 ( 64%)] Loss: 3.66 (3.66) Time: 0.425s, 2408.78/s (0.429s, 2384.62/s) LR: 1.729e-02 Data: 0.027 (0.037) +Train: 130 [ 250/312 ( 80%)] Loss: 3.66 (3.67) Time: 0.421s, 2432.11/s (0.428s, 2393.50/s) LR: 1.729e-02 Data: 0.027 (0.035) +Train: 130 [ 300/312 ( 96%)] Loss: 3.70 (3.68) Time: 0.422s, 2427.75/s (0.427s, 2400.23/s) LR: 1.729e-02 Data: 0.028 (0.034) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.436 (1.436) Loss: 5.216 ( 5.216) Acc@1: 19.727 ( 19.727) Acc@5: 35.254 ( 35.254) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.912 ( 5.155) Acc@1: 19.693 ( 20.214) Acc@5: 37.382 ( 36.124) +Train: 131 [ 0/312 ( 0%)] Loss: 3.71 (3.71) Time: 1.565s, 654.46/s (1.565s, 654.46/s) LR: 1.563e-02 Data: 1.183 (1.183) +Train: 131 [ 50/312 ( 16%)] Loss: 3.58 (3.64) Time: 0.417s, 2456.17/s (0.442s, 2314.63/s) LR: 1.563e-02 Data: 0.030 (0.050) +Train: 131 [ 100/312 ( 32%)] Loss: 3.58 (3.64) Time: 0.423s, 2421.03/s (0.432s, 2370.04/s) LR: 1.563e-02 Data: 0.027 (0.039) +Train: 131 [ 150/312 ( 48%)] Loss: 3.66 (3.65) Time: 0.425s, 2408.35/s (0.429s, 2388.65/s) LR: 1.563e-02 Data: 0.026 (0.035) +Train: 131 [ 200/312 ( 64%)] Loss: 3.74 (3.65) Time: 0.426s, 2404.52/s (0.427s, 2396.67/s) LR: 1.563e-02 Data: 0.028 (0.033) +Train: 131 [ 250/312 ( 80%)] Loss: 3.67 (3.66) Time: 0.422s, 2427.32/s (0.426s, 2402.61/s) LR: 1.563e-02 Data: 0.028 (0.032) +Train: 131 [ 300/312 ( 96%)] Loss: 3.76 (3.66) Time: 0.426s, 2404.46/s (0.425s, 2407.08/s) LR: 1.563e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.449 (1.449) Loss: 5.213 ( 5.213) Acc@1: 19.043 ( 19.043) Acc@5: 35.352 ( 35.352) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.899 ( 5.185) Acc@1: 20.283 ( 20.136) Acc@5: 39.033 ( 35.992) +Train: 132 [ 0/312 ( 0%)] Loss: 3.53 (3.53) Time: 1.803s, 568.07/s (1.803s, 568.07/s) LR: 1.404e-02 Data: 1.423 (1.423) +Train: 132 [ 50/312 ( 16%)] Loss: 3.65 (3.61) Time: 0.428s, 2391.79/s (0.445s, 2301.83/s) LR: 1.404e-02 Data: 0.028 (0.055) +Train: 132 [ 100/312 ( 32%)] Loss: 3.76 (3.63) Time: 0.422s, 2428.01/s (0.433s, 2363.69/s) LR: 1.404e-02 Data: 0.027 (0.041) +Train: 132 [ 150/312 ( 48%)] Loss: 3.61 (3.63) Time: 0.422s, 2428.68/s (0.429s, 2386.39/s) LR: 1.404e-02 Data: 0.028 (0.037) +Train: 132 [ 200/312 ( 64%)] Loss: 3.66 (3.64) Time: 0.426s, 2402.58/s (0.427s, 2395.51/s) LR: 1.404e-02 Data: 0.033 (0.034) +Train: 132 [ 250/312 ( 80%)] Loss: 3.61 (3.64) Time: 0.420s, 2439.12/s (0.426s, 2402.55/s) LR: 1.404e-02 Data: 0.027 (0.033) +Train: 132 [ 300/312 ( 96%)] Loss: 3.68 (3.65) Time: 0.416s, 2460.26/s (0.425s, 2407.09/s) LR: 1.404e-02 Data: 0.025 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.455 (1.455) Loss: 5.166 ( 5.166) Acc@1: 20.508 ( 20.508) Acc@5: 37.793 ( 37.793) +Test: [ 48/48] Time: 0.092 (0.334) Loss: 4.902 ( 5.165) Acc@1: 20.755 ( 20.224) Acc@5: 38.443 ( 36.130) +Train: 133 [ 0/312 ( 0%)] Loss: 3.58 (3.58) Time: 1.613s, 634.94/s (1.613s, 634.94/s) LR: 1.254e-02 Data: 1.170 (1.170) +Train: 133 [ 50/312 ( 16%)] Loss: 3.59 (3.60) Time: 0.418s, 2448.06/s (0.443s, 2311.24/s) LR: 1.254e-02 Data: 0.028 (0.050) +Train: 133 [ 100/312 ( 32%)] Loss: 3.52 (3.60) Time: 0.423s, 2422.88/s (0.432s, 2368.67/s) LR: 1.254e-02 Data: 0.026 (0.039) +Train: 133 [ 150/312 ( 48%)] Loss: 3.68 (3.61) Time: 0.420s, 2435.34/s (0.429s, 2388.08/s) LR: 1.254e-02 Data: 0.027 (0.035) +Train: 133 [ 200/312 ( 64%)] Loss: 3.61 (3.62) Time: 0.420s, 2438.91/s (0.427s, 2398.47/s) LR: 1.254e-02 Data: 0.026 (0.033) +Train: 133 [ 250/312 ( 80%)] Loss: 3.59 (3.62) Time: 0.424s, 2417.56/s (0.426s, 2404.21/s) LR: 1.254e-02 Data: 0.028 (0.032) +Train: 133 [ 300/312 ( 96%)] Loss: 3.66 (3.63) Time: 0.421s, 2431.82/s (0.425s, 2407.79/s) LR: 1.254e-02 Data: 0.030 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.431 (1.431) Loss: 5.227 ( 5.227) Acc@1: 19.238 ( 19.238) Acc@5: 36.133 ( 36.133) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.928 ( 5.194) Acc@1: 20.165 ( 20.004) Acc@5: 38.915 ( 36.016) +Train: 134 [ 0/312 ( 0%)] Loss: 3.50 (3.50) Time: 1.548s, 661.39/s (1.548s, 661.39/s) LR: 1.112e-02 Data: 1.171 (1.171) +Train: 134 [ 50/312 ( 16%)] Loss: 3.71 (3.60) Time: 0.422s, 2425.60/s (0.442s, 2318.24/s) LR: 1.112e-02 Data: 0.026 (0.050) +Train: 134 [ 100/312 ( 32%)] Loss: 3.56 (3.61) Time: 0.425s, 2406.91/s (0.432s, 2372.24/s) LR: 1.112e-02 Data: 0.033 (0.039) +Train: 134 [ 150/312 ( 48%)] Loss: 3.59 (3.61) Time: 0.420s, 2438.63/s (0.428s, 2391.10/s) LR: 1.112e-02 Data: 0.028 (0.035) +Train: 134 [ 200/312 ( 64%)] Loss: 3.70 (3.61) Time: 0.420s, 2440.00/s (0.426s, 2401.05/s) LR: 1.112e-02 Data: 0.028 (0.033) +Train: 134 [ 250/312 ( 80%)] Loss: 3.67 (3.62) Time: 0.423s, 2422.95/s (0.425s, 2406.77/s) LR: 1.112e-02 Data: 0.028 (0.032) +Train: 134 [ 300/312 ( 96%)] Loss: 3.61 (3.62) Time: 0.416s, 2459.43/s (0.425s, 2410.94/s) LR: 1.112e-02 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.438 (1.438) Loss: 5.181 ( 5.181) Acc@1: 18.750 ( 18.750) Acc@5: 36.426 ( 36.426) +Test: [ 48/48] Time: 0.091 (0.331) Loss: 4.878 ( 5.165) Acc@1: 20.755 ( 20.172) Acc@5: 38.325 ( 36.218) +Train: 135 [ 0/312 ( 0%)] Loss: 3.50 (3.50) Time: 1.699s, 602.75/s (1.699s, 602.75/s) LR: 9.789e-03 Data: 1.318 (1.318) +Train: 135 [ 50/312 ( 16%)] Loss: 3.56 (3.60) Time: 0.424s, 2413.53/s (0.445s, 2299.63/s) LR: 9.789e-03 Data: 0.027 (0.053) +Train: 135 [ 100/312 ( 32%)] Loss: 3.69 (3.60) Time: 0.418s, 2450.21/s (0.434s, 2361.43/s) LR: 9.789e-03 Data: 0.027 (0.041) +Train: 135 [ 150/312 ( 48%)] Loss: 3.58 (3.60) Time: 0.422s, 2427.91/s (0.429s, 2384.22/s) LR: 9.789e-03 Data: 0.028 (0.036) +Train: 135 [ 200/312 ( 64%)] Loss: 3.49 (3.61) Time: 0.421s, 2433.15/s (0.427s, 2395.80/s) LR: 9.789e-03 Data: 0.027 (0.034) +Train: 135 [ 250/312 ( 80%)] Loss: 3.66 (3.61) Time: 0.421s, 2431.65/s (0.426s, 2403.27/s) LR: 9.789e-03 Data: 0.028 (0.033) +Train: 135 [ 300/312 ( 96%)] Loss: 3.64 (3.61) Time: 0.420s, 2438.75/s (0.425s, 2407.79/s) LR: 9.789e-03 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.444 (1.444) Loss: 5.145 ( 5.145) Acc@1: 20.312 ( 20.312) Acc@5: 36.133 ( 36.133) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.893 ( 5.150) Acc@1: 20.755 ( 20.432) Acc@5: 38.208 ( 36.224) +Train: 136 [ 0/312 ( 0%)] Loss: 3.56 (3.56) Time: 1.536s, 666.70/s (1.536s, 666.70/s) LR: 8.536e-03 Data: 1.126 (1.126) +Train: 136 [ 50/312 ( 16%)] Loss: 3.63 (3.57) Time: 0.422s, 2426.28/s (0.441s, 2321.62/s) LR: 8.536e-03 Data: 0.028 (0.049) +Train: 136 [ 100/312 ( 32%)] Loss: 3.63 (3.57) Time: 0.420s, 2437.13/s (0.431s, 2375.90/s) LR: 8.536e-03 Data: 0.029 (0.038) +Train: 136 [ 150/312 ( 48%)] Loss: 3.66 (3.58) Time: 0.421s, 2432.55/s (0.428s, 2393.82/s) LR: 8.536e-03 Data: 0.026 (0.035) +Train: 136 [ 200/312 ( 64%)] Loss: 3.62 (3.58) Time: 0.417s, 2453.26/s (0.426s, 2403.53/s) LR: 8.536e-03 Data: 0.028 (0.033) +Train: 136 [ 250/312 ( 80%)] Loss: 3.57 (3.59) Time: 0.425s, 2408.19/s (0.425s, 2409.23/s) LR: 8.536e-03 Data: 0.027 (0.032) +Train: 136 [ 300/312 ( 96%)] Loss: 3.66 (3.59) Time: 0.423s, 2419.34/s (0.424s, 2413.00/s) LR: 8.536e-03 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.463 (1.463) Loss: 5.206 ( 5.206) Acc@1: 19.434 ( 19.434) Acc@5: 36.230 ( 36.230) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.907 ( 5.183) Acc@1: 20.283 ( 20.040) Acc@5: 38.915 ( 36.068) +Train: 137 [ 0/312 ( 0%)] Loss: 3.57 (3.57) Time: 1.651s, 620.35/s (1.651s, 620.35/s) LR: 7.367e-03 Data: 1.170 (1.170) +Train: 137 [ 50/312 ( 16%)] Loss: 3.59 (3.57) Time: 0.420s, 2436.60/s (0.444s, 2308.84/s) LR: 7.367e-03 Data: 0.028 (0.050) +Train: 137 [ 100/312 ( 32%)] Loss: 3.56 (3.57) Time: 0.425s, 2406.60/s (0.433s, 2367.28/s) LR: 7.367e-03 Data: 0.027 (0.039) +Train: 137 [ 150/312 ( 48%)] Loss: 3.50 (3.57) Time: 0.421s, 2433.30/s (0.429s, 2386.83/s) LR: 7.367e-03 Data: 0.027 (0.035) +Train: 137 [ 200/312 ( 64%)] Loss: 3.68 (3.58) Time: 0.421s, 2433.37/s (0.427s, 2397.66/s) LR: 7.367e-03 Data: 0.029 (0.033) +Train: 137 [ 250/312 ( 80%)] Loss: 3.61 (3.58) Time: 0.421s, 2430.00/s (0.426s, 2403.14/s) LR: 7.367e-03 Data: 0.027 (0.032) +Train: 137 [ 300/312 ( 96%)] Loss: 3.58 (3.58) Time: 0.429s, 2389.54/s (0.426s, 2406.47/s) LR: 7.367e-03 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.432 (1.432) Loss: 5.211 ( 5.211) Acc@1: 19.727 ( 19.727) Acc@5: 35.645 ( 35.645) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.943 ( 5.192) Acc@1: 19.811 ( 19.936) Acc@5: 37.146 ( 35.868) +Train: 138 [ 0/312 ( 0%)] Loss: 3.58 (3.58) Time: 1.670s, 613.30/s (1.670s, 613.30/s) LR: 6.283e-03 Data: 1.290 (1.290) +Train: 138 [ 50/312 ( 16%)] Loss: 3.62 (3.55) Time: 0.423s, 2419.34/s (0.443s, 2309.80/s) LR: 6.283e-03 Data: 0.025 (0.053) +Train: 138 [ 100/312 ( 32%)] Loss: 3.45 (3.55) Time: 0.419s, 2442.24/s (0.432s, 2368.67/s) LR: 6.283e-03 Data: 0.027 (0.041) +Train: 138 [ 150/312 ( 48%)] Loss: 3.56 (3.56) Time: 0.424s, 2415.26/s (0.429s, 2388.19/s) LR: 6.283e-03 Data: 0.024 (0.036) +Train: 138 [ 200/312 ( 64%)] Loss: 3.48 (3.57) Time: 0.424s, 2415.85/s (0.427s, 2398.53/s) LR: 6.283e-03 Data: 0.028 (0.034) +Train: 138 [ 250/312 ( 80%)] Loss: 3.54 (3.57) Time: 0.421s, 2429.99/s (0.426s, 2404.18/s) LR: 6.283e-03 Data: 0.029 (0.033) +Train: 138 [ 300/312 ( 96%)] Loss: 3.64 (3.57) Time: 0.420s, 2435.42/s (0.425s, 2408.64/s) LR: 6.283e-03 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.449 (1.449) Loss: 5.174 ( 5.174) Acc@1: 19.238 ( 19.238) Acc@5: 36.523 ( 36.523) +Test: [ 48/48] Time: 0.091 (0.332) Loss: 4.903 ( 5.164) Acc@1: 20.047 ( 20.136) Acc@5: 38.208 ( 36.190) +Train: 139 [ 0/312 ( 0%)] Loss: 3.51 (3.51) Time: 1.647s, 621.87/s (1.647s, 621.87/s) LR: 5.284e-03 Data: 1.267 (1.267) +Train: 139 [ 50/312 ( 16%)] Loss: 3.63 (3.55) Time: 0.423s, 2422.18/s (0.447s, 2291.84/s) LR: 5.284e-03 Data: 0.027 (0.055) +Train: 139 [ 100/312 ( 32%)] Loss: 3.58 (3.55) Time: 0.423s, 2421.11/s (0.434s, 2357.14/s) LR: 5.284e-03 Data: 0.030 (0.041) +Train: 139 [ 150/312 ( 48%)] Loss: 3.46 (3.55) Time: 0.424s, 2412.34/s (0.430s, 2380.93/s) LR: 5.284e-03 Data: 0.027 (0.037) +Train: 139 [ 200/312 ( 64%)] Loss: 3.54 (3.56) Time: 0.426s, 2404.73/s (0.428s, 2392.64/s) LR: 5.284e-03 Data: 0.023 (0.034) +Train: 139 [ 250/312 ( 80%)] Loss: 3.69 (3.56) Time: 0.418s, 2451.79/s (0.427s, 2399.10/s) LR: 5.284e-03 Data: 0.025 (0.033) +Train: 139 [ 300/312 ( 96%)] Loss: 3.43 (3.56) Time: 0.425s, 2409.14/s (0.426s, 2404.52/s) LR: 5.284e-03 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.447 (1.447) Loss: 5.221 ( 5.221) Acc@1: 19.141 ( 19.141) Acc@5: 35.254 ( 35.254) +Test: [ 48/48] Time: 0.091 (0.330) Loss: 4.938 ( 5.200) Acc@1: 19.458 ( 19.880) Acc@5: 36.675 ( 35.760) +Train: 140 [ 0/312 ( 0%)] Loss: 3.46 (3.46) Time: 1.643s, 623.21/s (1.643s, 623.21/s) LR: 4.370e-03 Data: 1.263 (1.263) +Train: 140 [ 50/312 ( 16%)] Loss: 3.59 (3.54) Time: 0.422s, 2425.15/s (0.444s, 2307.09/s) LR: 4.370e-03 Data: 0.028 (0.052) +Train: 140 [ 100/312 ( 32%)] Loss: 3.52 (3.54) Time: 0.421s, 2434.96/s (0.432s, 2368.28/s) LR: 4.370e-03 Data: 0.028 (0.040) +Train: 140 [ 150/312 ( 48%)] Loss: 3.50 (3.55) Time: 0.418s, 2446.96/s (0.429s, 2389.67/s) LR: 4.370e-03 Data: 0.027 (0.036) +Train: 140 [ 200/312 ( 64%)] Loss: 3.54 (3.55) Time: 0.419s, 2443.37/s (0.427s, 2397.68/s) LR: 4.370e-03 Data: 0.028 (0.034) +Train: 140 [ 250/312 ( 80%)] Loss: 3.54 (3.55) Time: 0.422s, 2425.87/s (0.426s, 2404.61/s) LR: 4.370e-03 Data: 0.028 (0.033) +Train: 140 [ 300/312 ( 96%)] Loss: 3.53 (3.55) Time: 0.425s, 2410.01/s (0.425s, 2408.43/s) LR: 4.370e-03 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 5.180 ( 5.180) Acc@1: 20.020 ( 20.020) Acc@5: 35.547 ( 35.547) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.921 ( 5.161) Acc@1: 20.283 ( 20.282) Acc@5: 37.854 ( 36.246) +Train: 141 [ 0/312 ( 0%)] Loss: 3.45 (3.45) Time: 1.598s, 640.81/s (1.598s, 640.81/s) LR: 3.543e-03 Data: 1.218 (1.218) +Train: 141 [ 50/312 ( 16%)] Loss: 3.56 (3.52) Time: 0.424s, 2414.30/s (0.443s, 2309.16/s) LR: 3.543e-03 Data: 0.025 (0.051) +Train: 141 [ 100/312 ( 32%)] Loss: 3.52 (3.53) Time: 0.422s, 2426.98/s (0.433s, 2367.20/s) LR: 3.543e-03 Data: 0.026 (0.039) +Train: 141 [ 150/312 ( 48%)] Loss: 3.49 (3.53) Time: 0.424s, 2412.58/s (0.429s, 2387.51/s) LR: 3.543e-03 Data: 0.029 (0.035) +Train: 141 [ 200/312 ( 64%)] Loss: 3.52 (3.54) Time: 0.420s, 2437.74/s (0.427s, 2397.95/s) LR: 3.543e-03 Data: 0.028 (0.033) +Train: 141 [ 250/312 ( 80%)] Loss: 3.47 (3.54) Time: 0.421s, 2432.08/s (0.426s, 2403.13/s) LR: 3.543e-03 Data: 0.029 (0.032) +Train: 141 [ 300/312 ( 96%)] Loss: 3.57 (3.54) Time: 0.421s, 2429.77/s (0.425s, 2408.22/s) LR: 3.543e-03 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.439 (1.439) Loss: 5.205 ( 5.205) Acc@1: 20.020 ( 20.020) Acc@5: 35.938 ( 35.938) +Test: [ 48/48] Time: 0.091 (0.329) Loss: 4.950 ( 5.198) Acc@1: 19.458 ( 19.894) Acc@5: 38.325 ( 35.892) +Train: 142 [ 0/312 ( 0%)] Loss: 3.58 (3.58) Time: 1.661s, 616.47/s (1.661s, 616.47/s) LR: 2.801e-03 Data: 1.281 (1.281) +Train: 142 [ 50/312 ( 16%)] Loss: 3.41 (3.51) Time: 0.422s, 2425.87/s (0.445s, 2298.70/s) LR: 2.801e-03 Data: 0.028 (0.052) +Train: 142 [ 100/312 ( 32%)] Loss: 3.63 (3.53) Time: 0.419s, 2444.36/s (0.433s, 2363.41/s) LR: 2.801e-03 Data: 0.028 (0.040) +Train: 142 [ 150/312 ( 48%)] Loss: 3.53 (3.53) Time: 0.425s, 2410.72/s (0.429s, 2384.96/s) LR: 2.801e-03 Data: 0.029 (0.036) +Train: 142 [ 200/312 ( 64%)] Loss: 3.54 (3.54) Time: 0.423s, 2422.25/s (0.427s, 2397.19/s) LR: 2.801e-03 Data: 0.028 (0.034) +Train: 142 [ 250/312 ( 80%)] Loss: 3.51 (3.54) Time: 0.421s, 2433.66/s (0.426s, 2404.20/s) LR: 2.801e-03 Data: 0.026 (0.032) +Train: 142 [ 300/312 ( 96%)] Loss: 3.64 (3.54) Time: 0.421s, 2434.79/s (0.425s, 2408.10/s) LR: 2.801e-03 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.469 (1.469) Loss: 5.174 ( 5.174) Acc@1: 19.238 ( 19.238) Acc@5: 37.109 ( 37.109) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.905 ( 5.150) Acc@1: 20.165 ( 20.260) Acc@5: 38.443 ( 36.396) +Train: 143 [ 0/312 ( 0%)] Loss: 3.57 (3.57) Time: 1.770s, 578.48/s (1.770s, 578.48/s) LR: 2.146e-03 Data: 1.389 (1.389) +Train: 143 [ 50/312 ( 16%)] Loss: 3.55 (3.54) Time: 0.418s, 2452.16/s (0.445s, 2299.44/s) LR: 2.146e-03 Data: 0.028 (0.054) +Train: 143 [ 100/312 ( 32%)] Loss: 3.52 (3.53) Time: 0.421s, 2430.62/s (0.434s, 2357.87/s) LR: 2.146e-03 Data: 0.026 (0.041) +Train: 143 [ 150/312 ( 48%)] Loss: 3.55 (3.53) Time: 0.420s, 2437.57/s (0.430s, 2381.04/s) LR: 2.146e-03 Data: 0.026 (0.036) +Train: 143 [ 200/312 ( 64%)] Loss: 3.50 (3.53) Time: 0.423s, 2420.28/s (0.428s, 2393.44/s) LR: 2.146e-03 Data: 0.026 (0.034) +Train: 143 [ 250/312 ( 80%)] Loss: 3.52 (3.53) Time: 0.420s, 2438.92/s (0.426s, 2401.09/s) LR: 2.146e-03 Data: 0.027 (0.033) +Train: 143 [ 300/312 ( 96%)] Loss: 3.49 (3.53) Time: 0.430s, 2381.55/s (0.426s, 2405.37/s) LR: 2.146e-03 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.428 (1.428) Loss: 5.218 ( 5.218) Acc@1: 18.945 ( 18.945) Acc@5: 35.645 ( 35.645) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.922 ( 5.188) Acc@1: 20.637 ( 19.920) Acc@5: 37.736 ( 36.028) +Train: 144 [ 0/312 ( 0%)] Loss: 3.45 (3.45) Time: 1.639s, 624.61/s (1.639s, 624.61/s) LR: 1.577e-03 Data: 1.258 (1.258) +Train: 144 [ 50/312 ( 16%)] Loss: 3.47 (3.53) Time: 0.419s, 2442.83/s (0.442s, 2315.05/s) LR: 1.577e-03 Data: 0.027 (0.051) +Train: 144 [ 100/312 ( 32%)] Loss: 3.46 (3.52) Time: 0.420s, 2437.51/s (0.431s, 2373.65/s) LR: 1.577e-03 Data: 0.026 (0.040) +Train: 144 [ 150/312 ( 48%)] Loss: 3.57 (3.52) Time: 0.420s, 2436.77/s (0.428s, 2392.33/s) LR: 1.577e-03 Data: 0.027 (0.036) +Train: 144 [ 200/312 ( 64%)] Loss: 3.57 (3.52) Time: 0.421s, 2432.54/s (0.427s, 2400.38/s) LR: 1.577e-03 Data: 0.029 (0.034) +Train: 144 [ 250/312 ( 80%)] Loss: 3.64 (3.53) Time: 0.420s, 2438.16/s (0.426s, 2405.69/s) LR: 1.577e-03 Data: 0.029 (0.033) +Train: 144 [ 300/312 ( 96%)] Loss: 3.54 (3.53) Time: 0.419s, 2441.19/s (0.425s, 2409.76/s) LR: 1.577e-03 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.459 (1.459) Loss: 5.220 ( 5.220) Acc@1: 19.336 ( 19.336) Acc@5: 35.352 ( 35.352) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.921 ( 5.187) Acc@1: 19.811 ( 19.932) Acc@5: 37.736 ( 35.948) +Train: 145 [ 0/312 ( 0%)] Loss: 3.46 (3.46) Time: 1.574s, 650.48/s (1.574s, 650.48/s) LR: 1.096e-03 Data: 1.194 (1.194) +Train: 145 [ 50/312 ( 16%)] Loss: 3.54 (3.52) Time: 0.423s, 2419.88/s (0.443s, 2314.05/s) LR: 1.096e-03 Data: 0.026 (0.050) +Train: 145 [ 100/312 ( 32%)] Loss: 3.63 (3.52) Time: 0.425s, 2408.34/s (0.432s, 2370.09/s) LR: 1.096e-03 Data: 0.028 (0.039) +Train: 145 [ 150/312 ( 48%)] Loss: 3.57 (3.52) Time: 0.423s, 2419.07/s (0.429s, 2388.05/s) LR: 1.096e-03 Data: 0.027 (0.035) +Train: 145 [ 200/312 ( 64%)] Loss: 3.56 (3.52) Time: 0.420s, 2439.51/s (0.427s, 2398.61/s) LR: 1.096e-03 Data: 0.027 (0.033) +Train: 145 [ 250/312 ( 80%)] Loss: 3.41 (3.52) Time: 0.422s, 2426.41/s (0.426s, 2404.32/s) LR: 1.096e-03 Data: 0.027 (0.032) +Train: 145 [ 300/312 ( 96%)] Loss: 3.61 (3.52) Time: 0.417s, 2453.94/s (0.425s, 2408.13/s) LR: 1.096e-03 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.447 (1.447) Loss: 5.237 ( 5.237) Acc@1: 19.336 ( 19.336) Acc@5: 35.352 ( 35.352) +Test: [ 48/48] Time: 0.092 (0.330) Loss: 4.935 ( 5.202) Acc@1: 20.165 ( 19.904) Acc@5: 37.736 ( 35.876) +Train: 146 [ 0/312 ( 0%)] Loss: 3.44 (3.44) Time: 1.607s, 637.06/s (1.607s, 637.06/s) LR: 7.014e-04 Data: 1.227 (1.227) +Train: 146 [ 50/312 ( 16%)] Loss: 3.49 (3.51) Time: 0.415s, 2465.73/s (0.441s, 2321.67/s) LR: 7.014e-04 Data: 0.024 (0.051) +Train: 146 [ 100/312 ( 32%)] Loss: 3.57 (3.52) Time: 0.421s, 2434.85/s (0.431s, 2374.63/s) LR: 7.014e-04 Data: 0.027 (0.039) +Train: 146 [ 150/312 ( 48%)] Loss: 3.46 (3.52) Time: 0.421s, 2429.54/s (0.428s, 2393.51/s) LR: 7.014e-04 Data: 0.028 (0.035) +Train: 146 [ 200/312 ( 64%)] Loss: 3.53 (3.52) Time: 0.419s, 2444.69/s (0.426s, 2404.00/s) LR: 7.014e-04 Data: 0.027 (0.033) +Train: 146 [ 250/312 ( 80%)] Loss: 3.56 (3.52) Time: 0.423s, 2419.44/s (0.425s, 2409.45/s) LR: 7.014e-04 Data: 0.028 (0.032) +Train: 146 [ 300/312 ( 96%)] Loss: 3.49 (3.52) Time: 0.428s, 2393.72/s (0.424s, 2412.84/s) LR: 7.014e-04 Data: 0.037 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.451 (1.451) Loss: 5.230 ( 5.230) Acc@1: 19.238 ( 19.238) Acc@5: 35.254 ( 35.254) +Test: [ 48/48] Time: 0.092 (0.331) Loss: 4.937 ( 5.203) Acc@1: 19.929 ( 19.890) Acc@5: 37.972 ( 35.818) +Train: 147 [ 0/312 ( 0%)] Loss: 3.53 (3.53) Time: 1.559s, 656.93/s (1.559s, 656.93/s) LR: 3.947e-04 Data: 1.052 (1.052) +Train: 147 [ 50/312 ( 16%)] Loss: 3.44 (3.52) Time: 0.423s, 2422.97/s (0.443s, 2311.44/s) LR: 3.947e-04 Data: 0.029 (0.048) +Train: 147 [ 100/312 ( 32%)] Loss: 3.44 (3.51) Time: 0.427s, 2395.33/s (0.432s, 2369.70/s) LR: 3.947e-04 Data: 0.028 (0.038) +Train: 147 [ 150/312 ( 48%)] Loss: 3.56 (3.52) Time: 0.423s, 2418.38/s (0.429s, 2388.89/s) LR: 3.947e-04 Data: 0.027 (0.034) +Train: 147 [ 200/312 ( 64%)] Loss: 3.58 (3.52) Time: 0.424s, 2417.45/s (0.427s, 2396.93/s) LR: 3.947e-04 Data: 0.029 (0.033) +Train: 147 [ 250/312 ( 80%)] Loss: 3.37 (3.52) Time: 0.422s, 2425.17/s (0.426s, 2402.66/s) LR: 3.947e-04 Data: 0.028 (0.032) +Train: 147 [ 300/312 ( 96%)] Loss: 3.40 (3.52) Time: 0.427s, 2395.37/s (0.425s, 2407.22/s) LR: 3.947e-04 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.438 (1.438) Loss: 5.215 ( 5.215) Acc@1: 19.434 ( 19.434) Acc@5: 35.938 ( 35.938) +Test: [ 48/48] Time: 0.092 (0.334) Loss: 4.917 ( 5.187) Acc@1: 20.047 ( 20.090) Acc@5: 38.090 ( 36.018) +Train: 148 [ 0/312 ( 0%)] Loss: 3.47 (3.47) Time: 1.683s, 608.60/s (1.683s, 608.60/s) LR: 1.754e-04 Data: 1.304 (1.304) +Train: 148 [ 50/312 ( 16%)] Loss: 3.41 (3.51) Time: 0.418s, 2450.17/s (0.445s, 2300.89/s) LR: 1.754e-04 Data: 0.024 (0.052) +Train: 148 [ 100/312 ( 32%)] Loss: 3.62 (3.51) Time: 0.425s, 2408.73/s (0.433s, 2362.19/s) LR: 1.754e-04 Data: 0.028 (0.040) +Train: 148 [ 150/312 ( 48%)] Loss: 3.44 (3.51) Time: 0.422s, 2427.50/s (0.430s, 2383.67/s) LR: 1.754e-04 Data: 0.033 (0.036) +Train: 148 [ 200/312 ( 64%)] Loss: 3.50 (3.51) Time: 0.420s, 2437.12/s (0.428s, 2393.78/s) LR: 1.754e-04 Data: 0.027 (0.034) +Train: 148 [ 250/312 ( 80%)] Loss: 3.44 (3.51) Time: 0.422s, 2428.85/s (0.427s, 2399.42/s) LR: 1.754e-04 Data: 0.028 (0.033) +Train: 148 [ 300/312 ( 96%)] Loss: 3.42 (3.51) Time: 0.421s, 2434.33/s (0.426s, 2404.13/s) LR: 1.754e-04 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.446 (1.446) Loss: 5.213 ( 5.213) Acc@1: 19.824 ( 19.824) Acc@5: 35.742 ( 35.742) +Test: [ 48/48] Time: 0.092 (0.332) Loss: 4.922 ( 5.187) Acc@1: 20.047 ( 20.016) Acc@5: 38.561 ( 35.972) +Train: 149 [ 0/312 ( 0%)] Loss: 3.45 (3.45) Time: 1.754s, 583.66/s (1.754s, 583.66/s) LR: 4.386e-05 Data: 1.169 (1.169) +Train: 149 [ 50/312 ( 16%)] Loss: 3.40 (3.50) Time: 0.423s, 2422.27/s (0.446s, 2297.22/s) LR: 4.386e-05 Data: 0.028 (0.049) +Train: 149 [ 100/312 ( 32%)] Loss: 3.43 (3.50) Time: 0.425s, 2407.39/s (0.434s, 2361.70/s) LR: 4.386e-05 Data: 0.029 (0.038) +Train: 149 [ 150/312 ( 48%)] Loss: 3.49 (3.51) Time: 0.424s, 2415.17/s (0.430s, 2384.00/s) LR: 4.386e-05 Data: 0.028 (0.035) +Train: 149 [ 200/312 ( 64%)] Loss: 3.46 (3.51) Time: 0.420s, 2435.78/s (0.428s, 2393.32/s) LR: 4.386e-05 Data: 0.027 (0.033) +Train: 149 [ 250/312 ( 80%)] Loss: 3.54 (3.51) Time: 0.423s, 2422.30/s (0.427s, 2400.60/s) LR: 4.386e-05 Data: 0.028 (0.032) +Train: 149 [ 300/312 ( 96%)] Loss: 3.47 (3.51) Time: 0.423s, 2421.28/s (0.426s, 2404.86/s) LR: 4.386e-05 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.441 (1.441) Loss: 5.208 ( 5.208) Acc@1: 18.945 ( 18.945) Acc@5: 35.938 ( 35.938) +Test: [ 48/48] Time: 0.092 (0.329) Loss: 4.930 ( 5.191) Acc@1: 20.165 ( 19.972) Acc@5: 38.208 ( 36.094) +*** Best metric: 30.15600001953125 (epoch 59) +--result +[ + { + "epoch": 61, + "train": { + "loss": 5.384984970092773 + }, + "validation": { + "loss": 4.142473132171631, + "top1": 29.25200003051758, + "top5": 50.66400000366211 + } + }, + { + "epoch": 57, + "train": { + "loss": 5.473479747772217 + }, + "validation": { + "loss": 4.173910147705078, + "top1": 29.294000010375978, + "top5": 51.52399995849609 + } + }, + { + "epoch": 50, + "train": { + "loss": 5.626305103302002 + }, + "validation": { + "loss": 4.127080610122681, + "top1": 29.295999973144532, + "top5": 51.73399998535156 + } + }, + { + "epoch": 56, + "train": { + "loss": 5.494744300842285 + }, + "validation": { + "loss": 4.162322541198731, + "top1": 29.330000010375976, + "top5": 50.95200004150391 + } + }, + { + "epoch": 54, + "train": { + "loss": 5.538029670715332 + }, + "validation": { + "loss": 4.130886701507569, + "top1": 29.330000024414062, + "top5": 51.35399999023438 + } + }, + { + "epoch": 47, + "train": { + "loss": 5.691268444061279 + }, + "validation": { + "loss": 4.099962968597412, + "top1": 29.39800007080078, + "top5": 52.71400003417969 + } + }, + { + "epoch": 41, + "train": { + "loss": 5.821564674377441 + }, + "validation": { + "loss": 4.1039053334045414, + "top1": 29.65000002319336, + "top5": 53.75599997314453 + } + }, + { + "epoch": 45, + "train": { + "loss": 5.736339569091797 + }, + "validation": { + "loss": 4.124713208160401, + "top1": 29.761999970703126, + "top5": 52.88199995117188 + } + }, + { + "epoch": 46, + "train": { + "loss": 5.714443206787109 + }, + "validation": { + "loss": 4.118756468887329, + "top1": 29.864000028076173, + "top5": 53.24200000488281 + } + }, + { + "epoch": 59, + "train": { + "loss": 5.426791667938232 + }, + "validation": { + "loss": 4.113894800415039, + "top1": 30.15600001953125, + "top5": 52.042000047607424 + } + } +] diff --git a/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/requirements.txt b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d388cd950a4a88b1d9f37efc5c83fee7de76a1be --- /dev/null +++ b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/requirements.txt @@ -0,0 +1,108 @@ +GitPython==3.1.44 +MarkupSafe==2.1.5 +PyYAML==6.0.2 +aiofiles==23.2.1 +aiohappyeyeballs==2.4.6 +aiohttp==3.11.12 +aiosignal==1.3.2 +annotated-types==0.7.0 +anyio==4.8.0 +async-timeout==5.0.1 +attrs==25.1.0 +certifi==2025.1.31 +charset-normalizer==3.4.1 +click==8.1.8 +contourpy==1.3.0 +cycler==0.12.1 +datasets==3.3.2 +dill==0.3.8 +docker-pycreds==0.4.0 +eval_type_backport==0.2.2 +exceptiongroup==1.2.2 +fastapi==0.115.8 +ffmpy==0.5.0 +filelock==3.17.0 +fonttools==4.56.0 +frozenlist==1.5.0 +fsspec==2024.12.0 +gitdb==4.0.12 +gradio==4.44.1 +gradio_client==1.3.0 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +huggingface-hub==0.29.1 +idna==3.10 +importlib_metadata==8.6.1 +importlib_resources==6.5.2 +Jinja2==3.1.5 +kiwisolver==1.4.7 +markdown-it-py==3.0.0 +matplotlib==3.9.4 +mdurl==0.1.2 +multidict==6.1.0 +multiprocess==0.70.16 +numpy==2.0.2 +orjson==3.10.15 +packaging==24.2 +pandas==2.2.3 +pillow==10.4.0 +platformdirs==4.3.6 +propcache==0.3.0 +protobuf==5.29.3 +psutil==7.0.0 +pyarrow==19.0.1 +pydantic==2.10.6 +pydantic_core==2.27.2 +pydub==0.25.1 +Pygments==2.19.1 +pyparsing==3.2.1 +python-dateutil==2.9.0.post0 +python-multipart==0.0.20 +pytz==2025.1 +requests==2.32.3 +rich==13.9.4 +ruff==0.9.7 +semantic-version==2.10.0 +sentry-sdk==2.22.0 +setproctitle==1.3.4 +shellingham==1.5.4 +six==1.17.0 +smmap==5.0.2 +sniffio==1.3.1 +starlette==0.45.3 +tomlkit==0.12.0 +tqdm==4.67.1 +typer==0.15.1 +typing_extensions==4.12.2 +tzdata==2025.1 +urllib3==2.3.0 +uvicorn==0.34.0 +wandb==0.19.7 +websockets==12.0 +xxhash==3.5.0 +yarl==1.18.3 +zipp==3.21.0 +mpmath==1.3.0 +networkx==3.2.1 +nvidia-cublas-cu12==12.4.5.8 +nvidia-cuda-cupti-cu12==12.4.127 +nvidia-cuda-nvrtc-cu12==12.4.127 +nvidia-cuda-runtime-cu12==12.4.127 +nvidia-cudnn-cu12==9.1.0.70 +nvidia-cufft-cu12==11.2.1.3 +nvidia-curand-cu12==10.3.5.147 +nvidia-cusolver-cu12==11.6.1.9 +nvidia-cusparse-cu12==12.3.1.170 +nvidia-cusparselt-cu12==0.6.2 +nvidia-nccl-cu12==2.21.5 +nvidia-nvjitlink-cu12==12.4.127 +nvidia-nvtx-cu12==12.4.127 +safetensors==0.5.2 +sympy==1.13.1 +torch==2.6.0 +torchvision==0.21.0 +triton==3.2.0 +pip==23.0.1 +setuptools==58.1.0 +wheel==0.45.1 diff --git a/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/wandb-metadata.json b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..e3c4e9b2ad56d87073a9c1e39ea0fae84afd8e3c --- /dev/null +++ b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/files/wandb-metadata.json @@ -0,0 +1,88 @@ +{ + "os": "Linux-5.10.233-224.894.amzn2.x86_64-x86_64-with-glibc2.36", + "python": "CPython 3.9.21", + "startedAt": "2025-02-22T01:50:06.786561Z", + "args": [ + "--dataset", + "hfds/datacomp/imagenet-1k-random-60.0-frac-1over4", + "--log-wandb", + "--wandb-project", + "ImageNetTraining60.0-frac-1over4", + "--experiment", + "ImageNetTraining60.0-frac-1over4", + "--model", + "seresnet34", + "--sched", + "cosine", + "--epochs", + "150", + "--warmup-epochs", + "5", + "--lr", + "0.4", + "--reprob", + "0.5", + "--remode", + "pixel", + "--batch-size", + "256", + "--amp", + "-j", + "4" + ], + "program": "/app/pytorch-image-models/train.py", + "codePath": "train.py", + "git": { + "remote": "https://github.com/huggingface/pytorch-image-models.git", + "commit": "e76ea5474db7c0f3cc00424efc6a4e52fb4e945e" + }, + "email": "meg@huggingface.co", + "root": "/app/pytorch-image-models", + "host": "r-datacomp-imagenettraining60-0-frac-1over4-igwikvtl-334fb-fbvt", + "executable": "/usr/local/bin/python3.9", + "codePathLocal": "train.py", + "cpu_count": 24, + "cpu_count_logical": 48, + "gpu": "NVIDIA L4", + "gpu_count": 4, + "disk": { + "/": { + "total": "3757625933824", + "used": "129192120320" + } + }, + "memory": { + "total": "195171028992" + }, + "cpu": { + "count": 24, + "countLogical": 48 + }, + "gpu_nvidia": [ + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + }, + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + }, + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + }, + { + "name": "NVIDIA L4", + "memoryTotal": "24152899584", + "cudaCores": 7424, + "architecture": "Ada" + } + ], + "cudaVersion": "12.4" +} \ No newline at end of file diff --git a/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-core.log b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-core.log new file mode 100644 index 0000000000000000000000000000000000000000..8ee515e5918b7509d2531faa1903f9b558437e73 --- /dev/null +++ b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-core.log @@ -0,0 +1,7 @@ +{"time":"2025-02-22T01:18:34.559633243Z","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmpjeq79eze/port-1.txt","pid":1,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false} +{"time":"2025-02-22T01:18:34.560714878Z","level":"INFO","msg":"Will exit if parent process dies.","ppid":1} +{"time":"2025-02-22T01:18:34.560706598Z","level":"INFO","msg":"server is running","addr":{"IP":"127.0.0.1","Port":40337,"Zone":""}} +{"time":"2025-02-22T01:18:34.75069457Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:35730"} +{"time":"2025-02-22T01:50:06.580750913Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:36036"} +{"time":"2025-02-22T01:50:06.787476512Z","level":"INFO","msg":"handleInformInit: received","streamId":"3pv3zoe0","id":"127.0.0.1:36036"} +{"time":"2025-02-22T01:50:06.892644726Z","level":"INFO","msg":"handleInformInit: stream started","streamId":"3pv3zoe0","id":"127.0.0.1:36036"} diff --git a/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-internal.log b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..22a7ee2db73423355269b65089213c10dd539b34 --- /dev/null +++ b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-internal.log @@ -0,0 +1,9 @@ +{"time":"2025-02-22T01:50:06.787729747Z","level":"INFO","msg":"stream: starting","core version":"0.19.7","symlink path":"/app/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-core.log"} +{"time":"2025-02-22T01:50:06.892597095Z","level":"INFO","msg":"created new stream","id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892638216Z","level":"INFO","msg":"stream: started","id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892712377Z","level":"INFO","msg":"sender: started","stream_id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892704957Z","level":"INFO","msg":"writer: Do: started","stream_id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:06.892806289Z","level":"INFO","msg":"handler: started","stream_id":"3pv3zoe0"} +{"time":"2025-02-22T01:50:07.093461314Z","level":"INFO","msg":"Starting system monitor"} +{"time":"2025-02-22T08:01:37.094049977Z","level":"ERROR","msg":"monitor: cpu: error sampling metrics: open /proc/156/stat: no such file or directory\nopen /proc/156/status: no such file or directory"} +{"time":"2025-02-22T08:01:37.094183951Z","level":"ERROR","msg":"monitor: memory: error sampling metrics: open /proc/156/statm: no such file or directory"} diff --git a/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug.log b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..f0aa447db772ada739b722a5a5e5f83e42d4d30c --- /dev/null +++ b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug.log @@ -0,0 +1,22 @@ +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Current SDK version is 0.19.7 +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Configure stats pid to 156 +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Loading settings from /home/user/.config/wandb/settings +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Loading settings from /app/pytorch-image-models/wandb/settings +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_setup.py:_flush():67] Loading settings from environment variables +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:setup_run_log_directory():647] Logging user logs to /app/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug.log +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:setup_run_log_directory():648] Logging internal logs to /app/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/logs/debug-internal.log +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():761] calling init triggers +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():766] wandb.init called with sweep_config: {} +config: {'data': None, 'data_dir': None, 'dataset': 'hfds/datacomp/imagenet-1k-random-60.0-frac-1over4', 'train_split': 'train', 'val_split': 'validation', 'train_num_samples': None, 'val_num_samples': None, 'dataset_download': False, 'class_map': '', 'input_img_mode': None, 'input_key': None, 'target_key': None, 'dataset_trust_remote_code': False, 'model': 'seresnet34', 'pretrained': False, 'pretrained_path': None, 'initial_checkpoint': '', 'resume': '', 'no_resume_opt': False, 'num_classes': 1000, 'gp': None, 'img_size': None, 'in_chans': None, 'input_size': None, 'crop_pct': None, 'mean': None, 'std': None, 'interpolation': '', 'batch_size': 256, 'validation_batch_size': None, 'channels_last': False, 'fuser': '', 'grad_accum_steps': 1, 'grad_checkpointing': False, 'fast_norm': False, 'model_kwargs': {}, 'head_init_scale': None, 'head_init_bias': None, 'torchcompile_mode': None, 'torchscript': False, 'torchcompile': None, 'device': 'cuda:0', 'amp': True, 'amp_dtype': 'float16', 'amp_impl': 'native', 'model_dtype': None, 'no_ddp_bb': False, 'synchronize_step': False, 'local_rank': 0, 'device_modules': None, 'opt': 'sgd', 'opt_eps': None, 'opt_betas': None, 'momentum': 0.9, 'weight_decay': 2e-05, 'clip_grad': None, 'clip_mode': 'norm', 'layer_decay': None, 'opt_kwargs': {}, 'sched': 'cosine', 'sched_on_updates': False, 'lr': 0.4, 'lr_base': 0.1, 'lr_base_size': 256, 'lr_base_scale': '', 'lr_noise': None, 'lr_noise_pct': 0.67, 'lr_noise_std': 1.0, 'lr_cycle_mul': 1.0, 'lr_cycle_decay': 0.5, 'lr_cycle_limit': 1, 'lr_k_decay': 1.0, 'warmup_lr': 1e-05, 'min_lr': 0, 'epochs': 150, 'epoch_repeats': 0.0, 'start_epoch': None, 'decay_milestones': [90, 180, 270], 'decay_epochs': 90, 'warmup_epochs': 5, 'warmup_prefix': False, 'cooldown_epochs': 0, 'patience_epochs': 10, 'decay_rate': 0.1, 'no_aug': False, 'train_crop_mode': None, 'scale': [0.08, 1.0], 'ratio': [0.75, 1.3333333333333333], 'hflip': 0.5, 'vflip': 0.0, 'color_jitter': 0.4, 'color_jitter_prob': None, 'grayscale_prob': None, 'gaussian_blur_prob': None, 'aa': None, 'aug_repeats': 0, 'aug_splits': 0, 'jsd_loss': False, 'bce_loss': False, 'bce_sum': False, 'bce_target_thresh': None, 'bce_pos_weight': None, 'reprob': 0.5, 'remode': 'pixel', 'recount': 1, 'resplit': False, 'mixup': 0.0, 'cutmix': 0.0, 'cutmix_minmax': None, 'mixup_prob': 1.0, 'mixup_switch_prob': 0.5, 'mixup_mode': 'batch', 'mixup_off_epoch': 0, 'smoothing': 0.1, 'train_interpolation': 'random', 'drop': 0.0, 'drop_connect': None, 'drop_path': None, 'drop_block': None, 'bn_momentum': None, 'bn_eps': None, 'sync_bn': False, 'dist_bn': 'reduce', 'split_bn': False, 'model_ema': False, 'model_ema_force_cpu': False, 'model_ema_decay': 0.9998, 'model_ema_warmup': False, 'seed': 42, 'worker_seeding': 'all', 'log_interval': 50, 'recovery_interval': 0, 'checkpoint_hist': 10, 'workers': 4, 'save_images': False, 'pin_mem': False, 'no_prefetcher': False, 'output': '', 'experiment': 'ImageNetTraining60.0-frac-1over4', 'eval_metric': 'top1', 'tta': 0, 'use_multi_epochs_loader': False, 'log_wandb': True, 'wandb_project': 'ImageNetTraining60.0-frac-1over4', 'wandb_tags': [], 'wandb_resume_id': '', 'prefetcher': True, 'distributed': True, 'world_size': 4, 'rank': 0, '_wandb': {}} +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():784] starting backend +2025-02-22 01:50:06,763 INFO MainThread:156 [wandb_init.py:init():788] sending inform_init request +2025-02-22 01:50:06,785 INFO MainThread:156 [backend.py:_multiprocessing_setup():97] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2025-02-22 01:50:06,786 INFO MainThread:156 [wandb_init.py:init():803] backend started and connected +2025-02-22 01:50:06,790 INFO MainThread:156 [wandb_init.py:init():896] updated telemetry +2025-02-22 01:50:06,814 INFO MainThread:156 [wandb_init.py:init():920] communicating run to backend with 90.0 second timeout +2025-02-22 01:50:07,090 INFO MainThread:156 [wandb_init.py:init():995] starting run threads in backend +2025-02-22 01:50:07,170 INFO MainThread:156 [wandb_run.py:_console_start():2377] atexit reg +2025-02-22 01:50:07,171 INFO MainThread:156 [wandb_run.py:_redirect():2227] redirect: wrap_raw +2025-02-22 01:50:07,171 INFO MainThread:156 [wandb_run.py:_redirect():2292] Wrapping output streams. +2025-02-22 01:50:07,171 INFO MainThread:156 [wandb_run.py:_redirect():2317] Redirects installed. +2025-02-22 01:50:07,173 INFO MainThread:156 [wandb_init.py:init():1037] run started, returning control to user process diff --git a/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/run-3pv3zoe0.wandb b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/run-3pv3zoe0.wandb new file mode 100644 index 0000000000000000000000000000000000000000..9c60a8d793bb0a8e21e74be3ac61e618485776c4 --- /dev/null +++ b/pytorch-image-models/wandb/run-20250222_015006-3pv3zoe0/run-3pv3zoe0.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97c109e9e62906456d50b5d5e52c895a489e8340d673cf50aa0fdde20ef5e4bf +size 5046272