diff --git "a/exp/log/log-train-2023-03-25-21-42-34-5" "b/exp/log/log-train-2023-03-25-21-42-34-5" new file mode 100644--- /dev/null +++ "b/exp/log/log-train-2023-03-25-21-42-34-5" @@ -0,0 +1,23072 @@ +2023-03-25 21:42:34,499 INFO [finetune.py:1046] (5/7) Training started +2023-03-25 21:42:34,499 INFO [finetune.py:1056] (5/7) Device: cuda:5 +2023-03-25 21:42:34,502 INFO [finetune.py:1065] (5/7) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '62e404dd3f3a811d73e424199b3408e309c06e1a', 'k2-git-date': 'Mon Jan 30 02:26:16 2023', 'lhotse-version': '1.12.0.dev+git.3ccfeb7.clean', 'torch-version': '1.13.0', 'torch-cuda-available': True, 'torch-cuda-version': '11.7', 'python-version': '3.8', 'icefall-git-branch': 'master', 'icefall-git-sha1': 'd74822d-dirty', 'icefall-git-date': 'Tue Mar 21 21:35:32 2023', 'icefall-path': '/home/lishaojie/icefall', 'k2-path': '/home/lishaojie/.conda/envs/env_lishaojie/lib/python3.8/site-packages/k2/__init__.py', 'lhotse-path': '/home/lishaojie/.conda/envs/env_lishaojie/lib/python3.8/site-packages/lhotse/__init__.py', 'hostname': 'cnc533', 'IP address': '127.0.1.1'}, 'world_size': 7, 'master_port': 18181, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.004, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'do_finetune': True, 'init_modules': 'encoder', 'finetune_ckpt': '/home/lishaojie/icefall/egs/commonvoice/ASR/pruned_transducer_stateless7_streaming/exp/english_pretrain/epoch-30.pt', 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 200, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-03-25 21:42:34,502 INFO [finetune.py:1067] (5/7) About to create model +2023-03-25 21:42:34,864 INFO [zipformer.py:405] (5/7) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-03-25 21:42:34,873 INFO [finetune.py:1071] (5/7) Number of model parameters: 70369391 +2023-03-25 21:42:34,873 INFO [finetune.py:626] (5/7) Loading checkpoint from /home/lishaojie/icefall/egs/commonvoice/ASR/pruned_transducer_stateless7_streaming/exp/english_pretrain/epoch-30.pt +2023-03-25 21:42:35,486 INFO [finetune.py:647] (5/7) Loading parameters starting with prefix encoder +2023-03-25 21:42:36,940 INFO [finetune.py:1093] (5/7) Using DDP +2023-03-25 21:42:37,684 INFO [commonvoice_fr.py:392] (5/7) About to get train cuts +2023-03-25 21:42:37,686 INFO [commonvoice_fr.py:218] (5/7) Enable MUSAN +2023-03-25 21:42:37,686 INFO [commonvoice_fr.py:219] (5/7) About to get Musan cuts +2023-03-25 21:42:39,680 INFO [commonvoice_fr.py:243] (5/7) Enable SpecAugment +2023-03-25 21:42:39,680 INFO [commonvoice_fr.py:244] (5/7) Time warp factor: 80 +2023-03-25 21:42:39,681 INFO [commonvoice_fr.py:254] (5/7) Num frame mask: 10 +2023-03-25 21:42:39,681 INFO [commonvoice_fr.py:267] (5/7) About to create train dataset +2023-03-25 21:42:39,681 INFO [commonvoice_fr.py:294] (5/7) Using DynamicBucketingSampler. +2023-03-25 21:42:42,389 INFO [commonvoice_fr.py:309] (5/7) About to create train dataloader +2023-03-25 21:42:42,389 INFO [commonvoice_fr.py:399] (5/7) About to get dev cuts +2023-03-25 21:42:42,390 INFO [commonvoice_fr.py:340] (5/7) About to create dev dataset +2023-03-25 21:42:42,798 INFO [commonvoice_fr.py:357] (5/7) About to create dev dataloader +2023-03-25 21:42:42,798 INFO [finetune.py:1289] (5/7) Sanity check -- see if any of the batches in epoch 1 would cause OOM. +2023-03-25 21:46:46,136 INFO [finetune.py:1317] (5/7) Maximum memory allocated so far is 4747MB +2023-03-25 21:46:46,828 INFO [finetune.py:1317] (5/7) Maximum memory allocated so far is 5383MB +2023-03-25 21:46:48,914 INFO [finetune.py:1317] (5/7) Maximum memory allocated so far is 5383MB +2023-03-25 21:46:49,575 INFO [finetune.py:1317] (5/7) Maximum memory allocated so far is 5383MB +2023-03-25 21:46:50,266 INFO [finetune.py:1317] (5/7) Maximum memory allocated so far is 5383MB +2023-03-25 21:46:50,962 INFO [finetune.py:1317] (5/7) Maximum memory allocated so far is 5383MB +2023-03-25 21:46:59,847 INFO [finetune.py:976] (5/7) Epoch 1, batch 0, loss[loss=7.491, simple_loss=6.805, pruned_loss=6.845, over 4693.00 frames. ], tot_loss[loss=7.491, simple_loss=6.805, pruned_loss=6.845, over 4693.00 frames. ], batch size: 59, lr: 2.00e-03, grad_scale: 2.0 +2023-03-25 21:46:59,847 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-25 21:47:14,951 INFO [finetune.py:1010] (5/7) Epoch 1, validation: loss=7.294, simple_loss=6.606, pruned_loss=6.863, over 2265189.00 frames. +2023-03-25 21:47:14,952 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 5383MB +2023-03-25 21:47:19,873 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5.0, num_to_drop=2, layers_to_drop={0, 3} +2023-03-25 21:47:30,300 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:48:00,208 INFO [finetune.py:976] (5/7) Epoch 1, batch 50, loss[loss=2.576, simple_loss=2.44, pruned_loss=1.366, over 4816.00 frames. ], tot_loss[loss=4.308, simple_loss=3.871, pruned_loss=4.195, over 215543.37 frames. ], batch size: 30, lr: 2.20e-03, grad_scale: 0.000244140625 +2023-03-25 21:48:33,009 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:48:53,452 WARNING [finetune.py:966] (5/7) Grad scale is small: 0.000244140625 +2023-03-25 21:48:53,452 INFO [finetune.py:976] (5/7) Epoch 1, batch 100, loss[loss=2.19, simple_loss=2.071, pruned_loss=1.179, over 4766.00 frames. ], tot_loss[loss=3.411, simple_loss=3.141, pruned_loss=2.625, over 379425.85 frames. ], batch size: 28, lr: 2.40e-03, grad_scale: 0.00048828125 +2023-03-25 21:49:13,200 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.539e+02 2.791e+03 6.484e+03 1.700e+04 1.722e+07, threshold=1.297e+04, percent-clipped=0.0 +2023-03-25 21:49:28,879 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144.0, num_to_drop=2, layers_to_drop={0, 3} +2023-03-25 21:49:37,441 INFO [finetune.py:976] (5/7) Epoch 1, batch 150, loss[loss=1.651, simple_loss=1.493, pruned_loss=1.275, over 4902.00 frames. ], tot_loss[loss=2.829, simple_loss=2.614, pruned_loss=2.057, over 508191.46 frames. ], batch size: 36, lr: 2.60e-03, grad_scale: 0.00048828125 +2023-03-25 21:49:46,452 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-03-25 21:50:01,305 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=2.04 vs. limit=2.0 +2023-03-25 21:50:02,327 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.30 vs. limit=2.0 +2023-03-25 21:50:15,717 WARNING [finetune.py:966] (5/7) Grad scale is small: 0.00048828125 +2023-03-25 21:50:15,717 INFO [finetune.py:976] (5/7) Epoch 1, batch 200, loss[loss=1.336, simple_loss=1.157, pruned_loss=1.246, over 4760.00 frames. ], tot_loss[loss=2.342, simple_loss=2.143, pruned_loss=1.769, over 606731.32 frames. ], batch size: 27, lr: 2.80e-03, grad_scale: 0.0009765625 +2023-03-25 21:50:29,331 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 2.018e+02 7.406e+02 1.293e+03 3.197e+03 6.754e+04, threshold=2.586e+03, percent-clipped=12.0 +2023-03-25 21:50:54,581 INFO [finetune.py:976] (5/7) Epoch 1, batch 250, loss[loss=1.447, simple_loss=1.233, pruned_loss=1.367, over 4905.00 frames. ], tot_loss[loss=2.034, simple_loss=1.839, pruned_loss=1.609, over 683544.80 frames. ], batch size: 37, lr: 3.00e-03, grad_scale: 0.0009765625 +2023-03-25 21:51:43,806 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=296.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:51:45,811 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=300.0, num_to_drop=2, layers_to_drop={0, 1} +2023-03-25 21:51:46,259 WARNING [finetune.py:966] (5/7) Grad scale is small: 0.0009765625 +2023-03-25 21:51:46,260 INFO [finetune.py:976] (5/7) Epoch 1, batch 300, loss[loss=1.403, simple_loss=1.181, pruned_loss=1.32, over 4811.00 frames. ], tot_loss[loss=1.828, simple_loss=1.632, pruned_loss=1.501, over 742873.41 frames. ], batch size: 40, lr: 3.20e-03, grad_scale: 0.001953125 +2023-03-25 21:51:58,214 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=2.35 vs. limit=2.0 +2023-03-25 21:51:58,580 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 2.075e+01 5.781e+01 1.827e+02 5.788e+02 1.230e+04, threshold=3.655e+02, percent-clipped=4.0 +2023-03-25 21:52:16,754 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.97 vs. limit=2.0 +2023-03-25 21:52:39,148 INFO [finetune.py:976] (5/7) Epoch 1, batch 350, loss[loss=1.153, simple_loss=0.9552, pruned_loss=1.093, over 4726.00 frames. ], tot_loss[loss=1.683, simple_loss=1.482, pruned_loss=1.424, over 790749.07 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 0.001953125 +2023-03-25 21:52:47,101 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=357.0, num_to_drop=2, layers_to_drop={0, 1} +2023-03-25 21:53:14,582 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=387.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:53:27,705 WARNING [finetune.py:966] (5/7) Grad scale is small: 0.001953125 +2023-03-25 21:53:27,706 INFO [finetune.py:976] (5/7) Epoch 1, batch 400, loss[loss=1.237, simple_loss=1.011, pruned_loss=1.167, over 4895.00 frames. ], tot_loss[loss=1.569, simple_loss=1.364, pruned_loss=1.359, over 827758.06 frames. ], batch size: 43, lr: 3.60e-03, grad_scale: 0.00390625 +2023-03-25 21:53:39,885 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.702e+01 2.277e+01 3.517e+01 1.113e+02 1.032e+03, threshold=7.035e+01, percent-clipped=3.0 +2023-03-25 21:53:50,378 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.79 vs. limit=2.0 +2023-03-25 21:53:51,259 WARNING [optim.py:389] (5/7) Scaling gradients by 0.06621765345335007, model_norm_threshold=70.34587860107422 +2023-03-25 21:53:51,345 INFO [optim.py:451] (5/7) Parameter Dominanting tot_sumsq module.encoder.encoder_embed.conv.0.weight with proportion 0.67, where dominant_sumsq=(grad_sumsq*orig_rms_sq)=7.539e+05, grad_sumsq = 2.933e+06, orig_rms_sq=2.571e-01 +2023-03-25 21:54:00,692 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=439.0, num_to_drop=2, layers_to_drop={0, 3} +2023-03-25 21:54:05,304 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=448.0, num_to_drop=2, layers_to_drop={0, 1} +2023-03-25 21:54:05,918 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.72 vs. limit=2.0 +2023-03-25 21:54:06,748 INFO [finetune.py:976] (5/7) Epoch 1, batch 450, loss[loss=1.134, simple_loss=0.9128, pruned_loss=1.069, over 4770.00 frames. ], tot_loss[loss=1.469, simple_loss=1.26, pruned_loss=1.294, over 857105.58 frames. ], batch size: 28, lr: 3.80e-03, grad_scale: 0.00390625 +2023-03-25 21:54:43,456 WARNING [finetune.py:966] (5/7) Grad scale is small: 0.00390625 +2023-03-25 21:54:43,457 INFO [finetune.py:976] (5/7) Epoch 1, batch 500, loss[loss=1.03, simple_loss=0.8173, pruned_loss=0.9688, over 4747.00 frames. ], tot_loss[loss=1.373, simple_loss=1.163, pruned_loss=1.222, over 878998.80 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 0.0078125 +2023-03-25 21:54:57,599 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.430e+01 1.676e+01 1.950e+01 4.114e+01 1.062e+03, threshold=3.899e+01, percent-clipped=11.0 +2023-03-25 21:54:58,772 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=6.43 vs. limit=2.0 +2023-03-25 21:55:21,547 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4026, 2.3377, 1.4578, 2.0717, 2.2371, 1.8204, 2.1374, 2.8171], + device='cuda:5'), covar=tensor([0.0573, 0.0610, 0.0798, 0.0831, 0.0463, 0.0329, 0.0391, 0.0334], + device='cuda:5'), in_proj_covar=tensor([0.0306, 0.0306, 0.0261, 0.0354, 0.0287, 0.0239, 0.0303, 0.0228], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 21:55:28,674 INFO [finetune.py:976] (5/7) Epoch 1, batch 550, loss[loss=0.9857, simple_loss=0.78, pruned_loss=0.9, over 4910.00 frames. ], tot_loss[loss=1.289, simple_loss=1.08, pruned_loss=1.154, over 896175.14 frames. ], batch size: 37, lr: 4.00e-03, grad_scale: 0.0078125 +2023-03-25 21:55:39,560 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=562.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:55:42,642 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=568.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:55:47,796 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-03-25 21:55:52,409 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5191, 3.2390, 3.2945, 1.5378, 2.8973, 3.3140, 3.1455, 2.6127], + device='cuda:5'), covar=tensor([0.0089, 0.0045, 0.0026, 0.0073, 0.0029, 0.0029, 0.0026, 0.0065], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0176, 0.0133, 0.0143, 0.0148, 0.0145, 0.0168, 0.0183], + device='cuda:5'), out_proj_covar=tensor([1.1290e-04, 1.3152e-04, 9.7269e-05, 1.0440e-04, 1.0801e-04, 1.0857e-04, + 1.2641e-04, 1.3710e-04], device='cuda:5') +2023-03-25 21:56:03,095 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=590.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 21:56:05,210 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0570, 2.5609, 1.5666, 1.8702, 2.0325, 2.5953, 2.0126, 2.7208], + device='cuda:5'), covar=tensor([0.0286, 0.0292, 0.0319, 0.0448, 0.0239, 0.0183, 0.0262, 0.0117], + device='cuda:5'), in_proj_covar=tensor([0.0306, 0.0306, 0.0261, 0.0354, 0.0287, 0.0239, 0.0303, 0.0228], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 21:56:09,551 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=600.0, num_to_drop=2, layers_to_drop={1, 2} +2023-03-25 21:56:09,982 WARNING [finetune.py:966] (5/7) Grad scale is small: 0.0078125 +2023-03-25 21:56:09,982 INFO [finetune.py:976] (5/7) Epoch 1, batch 600, loss[loss=1.024, simple_loss=0.8008, pruned_loss=0.9267, over 4909.00 frames. ], tot_loss[loss=1.225, simple_loss=1.015, pruned_loss=1.099, over 910216.43 frames. ], batch size: 35, lr: 4.00e-03, grad_scale: 0.015625 +2023-03-25 21:56:22,615 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.472e+01 1.758e+01 2.024e+01 2.271e+01 8.528e+01, threshold=4.048e+01, percent-clipped=5.0 +2023-03-25 21:56:32,945 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=623.0, num_to_drop=2, layers_to_drop={1, 2} +2023-03-25 21:56:36,133 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=629.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 21:56:45,823 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=11.22 vs. limit=5.0 +2023-03-25 21:56:54,359 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=648.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:56:55,843 INFO [finetune.py:976] (5/7) Epoch 1, batch 650, loss[loss=1.079, simple_loss=0.8427, pruned_loss=0.9502, over 4863.00 frames. ], tot_loss[loss=1.183, simple_loss=0.9678, pruned_loss=1.06, over 922043.23 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 0.015625 +2023-03-25 21:56:55,943 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=651.0, num_to_drop=2, layers_to_drop={0, 1} +2023-03-25 21:56:56,433 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=652.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 21:57:04,049 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-03-25 21:57:31,099 INFO [finetune.py:976] (5/7) Epoch 1, batch 700, loss[loss=1.046, simple_loss=0.8038, pruned_loss=0.9212, over 4786.00 frames. ], tot_loss[loss=1.147, simple_loss=0.9283, pruned_loss=1.024, over 929803.50 frames. ], batch size: 29, lr: 4.00e-03, grad_scale: 0.03125 +2023-03-25 21:57:38,289 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.810e+01 2.037e+01 2.232e+01 2.628e+01 5.516e+01, threshold=4.463e+01, percent-clipped=4.0 +2023-03-25 21:57:54,950 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.26 vs. limit=2.0 +2023-03-25 21:57:55,424 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=10.23 vs. limit=5.0 +2023-03-25 21:57:59,232 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=739.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 21:58:01,229 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=743.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 21:58:05,819 INFO [finetune.py:976] (5/7) Epoch 1, batch 750, loss[loss=1.039, simple_loss=0.7941, pruned_loss=0.8989, over 4907.00 frames. ], tot_loss[loss=1.115, simple_loss=0.893, pruned_loss=0.9908, over 934612.63 frames. ], batch size: 37, lr: 4.00e-03, grad_scale: 0.03125 +2023-03-25 21:58:07,983 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=3.87 vs. limit=2.0 +2023-03-25 21:58:29,192 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=787.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 21:58:31,433 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=16.59 vs. limit=5.0 +2023-03-25 21:58:35,100 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=10.45 vs. limit=5.0 +2023-03-25 21:58:36,509 INFO [finetune.py:976] (5/7) Epoch 1, batch 800, loss[loss=0.9891, simple_loss=0.7549, pruned_loss=0.8369, over 4810.00 frames. ], tot_loss[loss=1.089, simple_loss=0.8634, pruned_loss=0.96, over 939004.02 frames. ], batch size: 40, lr: 4.00e-03, grad_scale: 0.0625 +2023-03-25 21:58:45,188 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 2.059e+01 2.266e+01 2.508e+01 2.744e+01 4.199e+01, threshold=5.016e+01, percent-clipped=0.0 +2023-03-25 21:58:55,854 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=3.55 vs. limit=2.0 +2023-03-25 21:59:05,019 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.18 vs. limit=2.0 +2023-03-25 21:59:20,570 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=847.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 21:59:22,542 INFO [finetune.py:976] (5/7) Epoch 1, batch 850, loss[loss=0.8795, simple_loss=0.6648, pruned_loss=0.7374, over 4913.00 frames. ], tot_loss[loss=1.059, simple_loss=0.832, pruned_loss=0.9262, over 942639.10 frames. ], batch size: 43, lr: 4.00e-03, grad_scale: 0.0625 +2023-03-25 21:59:53,936 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.18 vs. limit=2.0 +2023-03-25 22:00:12,167 INFO [finetune.py:976] (5/7) Epoch 1, batch 900, loss[loss=0.9473, simple_loss=0.7008, pruned_loss=0.7999, over 4867.00 frames. ], tot_loss[loss=1.029, simple_loss=0.8008, pruned_loss=0.8929, over 945999.99 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 0.125 +2023-03-25 22:00:16,299 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=908.0, num_to_drop=2, layers_to_drop={0, 1} +2023-03-25 22:00:25,568 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 2.101e+01 2.406e+01 2.575e+01 3.027e+01 5.726e+01, threshold=5.150e+01, percent-clipped=1.0 +2023-03-25 22:00:28,262 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=918.0, num_to_drop=2, layers_to_drop={0, 1} +2023-03-25 22:00:32,444 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=924.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 22:00:53,628 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=946.0, num_to_drop=2, layers_to_drop={0, 3} +2023-03-25 22:00:56,166 INFO [finetune.py:976] (5/7) Epoch 1, batch 950, loss[loss=0.9134, simple_loss=0.6765, pruned_loss=0.7535, over 4723.00 frames. ], tot_loss[loss=1.007, simple_loss=0.7765, pruned_loss=0.8661, over 948636.69 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 0.125 +2023-03-25 22:00:56,742 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=952.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:01:43,866 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=1000.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:01:44,323 INFO [finetune.py:976] (5/7) Epoch 1, batch 1000, loss[loss=0.9119, simple_loss=0.6664, pruned_loss=0.7491, over 4750.00 frames. ], tot_loss[loss=1.004, simple_loss=0.7667, pruned_loss=0.8547, over 948748.07 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 0.25 +2023-03-25 22:01:58,290 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 2.382e+01 2.890e+01 3.153e+01 3.664e+01 7.462e+01, threshold=6.306e+01, percent-clipped=2.0 +2023-03-25 22:02:21,757 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1043.0, num_to_drop=2, layers_to_drop={0, 1} +2023-03-25 22:02:31,287 INFO [finetune.py:976] (5/7) Epoch 1, batch 1050, loss[loss=1.003, simple_loss=0.7309, pruned_loss=0.8093, over 4915.00 frames. ], tot_loss[loss=1.011, simple_loss=0.7648, pruned_loss=0.8508, over 950376.52 frames. ], batch size: 42, lr: 4.00e-03, grad_scale: 0.25 +2023-03-25 22:03:07,641 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=1091.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:03:17,830 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=33.45 vs. limit=5.0 +2023-03-25 22:03:18,240 INFO [finetune.py:976] (5/7) Epoch 1, batch 1100, loss[loss=1.034, simple_loss=0.7539, pruned_loss=0.8184, over 4795.00 frames. ], tot_loss[loss=1.009, simple_loss=0.7578, pruned_loss=0.8398, over 950417.20 frames. ], batch size: 51, lr: 4.00e-03, grad_scale: 0.5 +2023-03-25 22:03:30,774 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 2.698e+01 3.337e+01 3.640e+01 4.251e+01 7.174e+01, threshold=7.279e+01, percent-clipped=4.0 +2023-03-25 22:03:30,914 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=2.47 vs. limit=2.0 +2023-03-25 22:04:04,829 INFO [finetune.py:976] (5/7) Epoch 1, batch 1150, loss[loss=0.9744, simple_loss=0.7061, pruned_loss=0.763, over 4835.00 frames. ], tot_loss[loss=1.005, simple_loss=0.7498, pruned_loss=0.8264, over 951359.71 frames. ], batch size: 30, lr: 4.00e-03, grad_scale: 0.5 +2023-03-25 22:04:06,000 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1153.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:04:46,446 INFO [finetune.py:976] (5/7) Epoch 1, batch 1200, loss[loss=0.9261, simple_loss=0.6813, pruned_loss=0.7015, over 4789.00 frames. ], tot_loss[loss=0.9879, simple_loss=0.7335, pruned_loss=0.8012, over 951791.76 frames. ], batch size: 29, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:04:47,539 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1203.0, num_to_drop=2, layers_to_drop={1, 3} +2023-03-25 22:04:59,186 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 3.248e+01 4.460e+01 5.563e+01 6.854e+01 1.013e+02, threshold=1.113e+02, percent-clipped=20.0 +2023-03-25 22:04:59,403 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1214.0, num_to_drop=2, layers_to_drop={0, 3} +2023-03-25 22:05:01,662 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1218.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:05:06,572 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1224.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:05:23,065 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=7.64 vs. limit=5.0 +2023-03-25 22:05:24,109 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1246.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 22:05:26,685 INFO [finetune.py:976] (5/7) Epoch 1, batch 1250, loss[loss=0.8595, simple_loss=0.6359, pruned_loss=0.6372, over 4756.00 frames. ], tot_loss[loss=0.9652, simple_loss=0.7153, pruned_loss=0.771, over 952177.33 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:05:32,335 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7841, 1.2898, 2.3492, 3.9011, 2.7465, 2.8160, 0.7638, 2.8857], + device='cuda:5'), covar=tensor([0.2496, 0.2996, 0.1797, 0.0785, 0.1452, 0.1890, 0.3054, 0.1217], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0119, 0.0135, 0.0156, 0.0108, 0.0143, 0.0129, 0.0114], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-25 22:05:46,032 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=1266.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:05:49,133 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=1272.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:06:08,481 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=1294.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:06:14,931 INFO [finetune.py:976] (5/7) Epoch 1, batch 1300, loss[loss=0.8525, simple_loss=0.6388, pruned_loss=0.6148, over 4714.00 frames. ], tot_loss[loss=0.9399, simple_loss=0.6969, pruned_loss=0.7384, over 952489.99 frames. ], batch size: 59, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:06:23,507 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 5.599e+01 8.403e+01 9.999e+01 1.262e+02 2.600e+02, threshold=2.000e+02, percent-clipped=40.0 +2023-03-25 22:06:57,097 INFO [finetune.py:976] (5/7) Epoch 1, batch 1350, loss[loss=0.8381, simple_loss=0.6289, pruned_loss=0.5955, over 4925.00 frames. ], tot_loss[loss=0.9168, simple_loss=0.6812, pruned_loss=0.7073, over 952222.85 frames. ], batch size: 38, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:07:06,864 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1945, 1.2122, 1.6352, 1.5114, 1.6081, 3.3070, 1.0610, 1.4468], + device='cuda:5'), covar=tensor([0.1577, 0.2806, 0.3123, 0.1935, 0.2247, 0.0560, 0.2950, 0.2954], + device='cuda:5'), in_proj_covar=tensor([0.0072, 0.0081, 0.0072, 0.0074, 0.0091, 0.0076, 0.0085, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 22:07:20,917 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.41 vs. limit=5.0 +2023-03-25 22:07:48,498 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1285, 3.1831, 3.2231, 1.1371, 3.5015, 2.3734, 0.7616, 2.0658], + device='cuda:5'), covar=tensor([0.2516, 0.1078, 0.1267, 0.3611, 0.0834, 0.0963, 0.4219, 0.1313], + device='cuda:5'), in_proj_covar=tensor([0.0148, 0.0148, 0.0157, 0.0124, 0.0148, 0.0113, 0.0138, 0.0115], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:07:50,278 INFO [finetune.py:976] (5/7) Epoch 1, batch 1400, loss[loss=0.9182, simple_loss=0.7066, pruned_loss=0.6285, over 4890.00 frames. ], tot_loss[loss=0.9005, simple_loss=0.6722, pruned_loss=0.681, over 952242.96 frames. ], batch size: 43, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:07:58,271 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.277e+01 1.400e+02 1.610e+02 1.980e+02 2.974e+02, threshold=3.221e+02, percent-clipped=23.0 +2023-03-25 22:08:02,495 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.34 vs. limit=5.0 +2023-03-25 22:08:09,198 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1434.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:08:14,455 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8832, 1.5573, 1.4351, 1.4676, 2.1690, 2.2275, 1.7586, 1.2064], + device='cuda:5'), covar=tensor([0.0352, 0.0633, 0.0582, 0.0429, 0.0288, 0.0226, 0.0404, 0.1539], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0133, 0.0130, 0.0118, 0.0107, 0.0128, 0.0134, 0.0167], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:08:20,139 INFO [finetune.py:976] (5/7) Epoch 1, batch 1450, loss[loss=0.7097, simple_loss=0.556, pruned_loss=0.4719, over 4865.00 frames. ], tot_loss[loss=0.8772, simple_loss=0.659, pruned_loss=0.6499, over 953837.36 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:08:44,750 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.74 vs. limit=5.0 +2023-03-25 22:08:47,669 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1495.0, num_to_drop=2, layers_to_drop={1, 3} +2023-03-25 22:08:51,789 INFO [finetune.py:976] (5/7) Epoch 1, batch 1500, loss[loss=0.8198, simple_loss=0.631, pruned_loss=0.5488, over 4805.00 frames. ], tot_loss[loss=0.8533, simple_loss=0.646, pruned_loss=0.619, over 955123.61 frames. ], batch size: 51, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:08:52,967 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1503.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:09:02,831 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1509.0, num_to_drop=2, layers_to_drop={1, 3} +2023-03-25 22:09:05,947 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.968e+01 1.844e+02 2.293e+02 2.711e+02 4.587e+02, threshold=4.586e+02, percent-clipped=13.0 +2023-03-25 22:09:42,482 INFO [finetune.py:976] (5/7) Epoch 1, batch 1550, loss[loss=0.6564, simple_loss=0.5262, pruned_loss=0.4188, over 4743.00 frames. ], tot_loss[loss=0.8211, simple_loss=0.6276, pruned_loss=0.5831, over 955110.56 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 1.0 +2023-03-25 22:09:42,539 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=1551.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:09:52,848 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1566.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:10:14,214 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.96 vs. limit=5.0 +2023-03-25 22:10:33,766 INFO [finetune.py:976] (5/7) Epoch 1, batch 1600, loss[loss=0.6367, simple_loss=0.4976, pruned_loss=0.4122, over 4728.00 frames. ], tot_loss[loss=0.7839, simple_loss=0.6052, pruned_loss=0.5454, over 954534.73 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:10:40,876 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1611.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:10:42,945 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.965e+02 2.441e+02 2.819e+02 5.041e+02, threshold=4.882e+02, percent-clipped=1.0 +2023-03-25 22:10:55,952 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1627.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:11:18,723 INFO [finetune.py:976] (5/7) Epoch 1, batch 1650, loss[loss=0.5522, simple_loss=0.4686, pruned_loss=0.3287, over 4763.00 frames. ], tot_loss[loss=0.7496, simple_loss=0.5848, pruned_loss=0.5111, over 954322.14 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:11:41,969 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1672.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:11:48,810 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-03-25 22:12:02,518 INFO [finetune.py:976] (5/7) Epoch 1, batch 1700, loss[loss=0.6819, simple_loss=0.5614, pruned_loss=0.4151, over 4865.00 frames. ], tot_loss[loss=0.7221, simple_loss=0.5691, pruned_loss=0.4826, over 954985.35 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:12:13,433 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2168, 3.6106, 3.6548, 4.0207, 3.8998, 3.7601, 4.3480, 1.5345], + device='cuda:5'), covar=tensor([0.1032, 0.1289, 0.1060, 0.1239, 0.1780, 0.1428, 0.0915, 0.5733], + device='cuda:5'), in_proj_covar=tensor([0.0365, 0.0240, 0.0255, 0.0289, 0.0343, 0.0282, 0.0304, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:12:14,553 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 2.187e+02 2.736e+02 3.197e+02 8.210e+02, threshold=5.471e+02, percent-clipped=2.0 +2023-03-25 22:12:43,269 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-25 22:12:53,678 INFO [finetune.py:976] (5/7) Epoch 1, batch 1750, loss[loss=0.6547, simple_loss=0.5558, pruned_loss=0.3854, over 4823.00 frames. ], tot_loss[loss=0.7026, simple_loss=0.5602, pruned_loss=0.4598, over 955905.21 frames. ], batch size: 38, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:13:29,988 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1790.0, num_to_drop=1, layers_to_drop={3} +2023-03-25 22:13:31,437 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.11 vs. limit=5.0 +2023-03-25 22:13:36,052 INFO [finetune.py:976] (5/7) Epoch 1, batch 1800, loss[loss=0.5256, simple_loss=0.457, pruned_loss=0.3013, over 4760.00 frames. ], tot_loss[loss=0.6837, simple_loss=0.5513, pruned_loss=0.4388, over 954133.13 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:13:40,477 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1809.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 22:13:43,024 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 2.215e+02 2.629e+02 3.291e+02 5.990e+02, threshold=5.258e+02, percent-clipped=1.0 +2023-03-25 22:13:59,215 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1838.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:14:06,690 INFO [finetune.py:976] (5/7) Epoch 1, batch 1850, loss[loss=0.6349, simple_loss=0.542, pruned_loss=0.3684, over 4810.00 frames. ], tot_loss[loss=0.6638, simple_loss=0.5411, pruned_loss=0.4184, over 951566.19 frames. ], batch size: 45, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:14:10,079 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=1857.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:14:10,106 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1857.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:14:17,817 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1870.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:14:51,506 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1899.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 22:14:52,522 INFO [finetune.py:976] (5/7) Epoch 1, batch 1900, loss[loss=0.5457, simple_loss=0.475, pruned_loss=0.3103, over 4903.00 frames. ], tot_loss[loss=0.6447, simple_loss=0.5313, pruned_loss=0.3993, over 952851.77 frames. ], batch size: 46, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:15:03,947 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.208e+02 2.560e+02 3.227e+02 6.450e+02, threshold=5.121e+02, percent-clipped=1.0 +2023-03-25 22:15:11,572 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1918.0, num_to_drop=2, layers_to_drop={1, 3} +2023-03-25 22:15:14,238 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1922.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:15:20,183 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1931.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:15:22,900 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3950, 3.2408, 3.2449, 1.2240, 3.4051, 2.4766, 0.8294, 2.2418], + device='cuda:5'), covar=tensor([0.2328, 0.1222, 0.1622, 0.3381, 0.1087, 0.0857, 0.3677, 0.1310], + device='cuda:5'), in_proj_covar=tensor([0.0147, 0.0147, 0.0156, 0.0123, 0.0146, 0.0110, 0.0135, 0.0113], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:15:37,065 INFO [finetune.py:976] (5/7) Epoch 1, batch 1950, loss[loss=0.4271, simple_loss=0.3884, pruned_loss=0.2333, over 4029.00 frames. ], tot_loss[loss=0.6224, simple_loss=0.5183, pruned_loss=0.3793, over 952692.88 frames. ], batch size: 17, lr: 4.00e-03, grad_scale: 2.0 +2023-03-25 22:15:43,105 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-25 22:15:46,012 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1967.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:16:01,778 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-25 22:16:12,904 INFO [finetune.py:976] (5/7) Epoch 1, batch 2000, loss[loss=0.5931, simple_loss=0.5118, pruned_loss=0.3372, over 4763.00 frames. ], tot_loss[loss=0.6017, simple_loss=0.5053, pruned_loss=0.3617, over 950857.18 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 4.0 +2023-03-25 22:16:22,847 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.183e+02 2.758e+02 3.285e+02 7.843e+02, threshold=5.515e+02, percent-clipped=1.0 +2023-03-25 22:16:57,265 INFO [finetune.py:976] (5/7) Epoch 1, batch 2050, loss[loss=0.506, simple_loss=0.4612, pruned_loss=0.2754, over 4813.00 frames. ], tot_loss[loss=0.5759, simple_loss=0.49, pruned_loss=0.3407, over 951299.08 frames. ], batch size: 41, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:17:04,283 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-03-25 22:17:31,381 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2090.0, num_to_drop=2, layers_to_drop={1, 2} +2023-03-25 22:17:41,738 INFO [finetune.py:976] (5/7) Epoch 1, batch 2100, loss[loss=0.4761, simple_loss=0.4316, pruned_loss=0.2603, over 4804.00 frames. ], tot_loss[loss=0.5603, simple_loss=0.4831, pruned_loss=0.3264, over 952801.49 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:17:55,063 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 2.022e+02 2.484e+02 2.961e+02 6.695e+02, threshold=4.968e+02, percent-clipped=1.0 +2023-03-25 22:17:55,516 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-03-25 22:18:12,539 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=2138.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:18:20,493 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3420, 1.8782, 1.4341, 1.8513, 1.8964, 3.3973, 1.5379, 1.7743], + device='cuda:5'), covar=tensor([0.1238, 0.1589, 0.1339, 0.1226, 0.1600, 0.0208, 0.1601, 0.2111], + device='cuda:5'), in_proj_covar=tensor([0.0068, 0.0074, 0.0067, 0.0070, 0.0086, 0.0071, 0.0080, 0.0074], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 22:18:29,608 INFO [finetune.py:976] (5/7) Epoch 1, batch 2150, loss[loss=0.4651, simple_loss=0.425, pruned_loss=0.2526, over 4826.00 frames. ], tot_loss[loss=0.5529, simple_loss=0.4819, pruned_loss=0.3179, over 954114.87 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:19:03,496 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2194.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:19:08,499 INFO [finetune.py:976] (5/7) Epoch 1, batch 2200, loss[loss=0.3988, simple_loss=0.3693, pruned_loss=0.2141, over 4739.00 frames. ], tot_loss[loss=0.5438, simple_loss=0.4786, pruned_loss=0.3091, over 953522.88 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:19:17,020 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2213.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:19:17,477 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.355e+02 2.819e+02 3.325e+02 5.172e+02, threshold=5.637e+02, percent-clipped=1.0 +2023-03-25 22:19:22,650 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2222.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:19:28,138 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2226.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:19:45,212 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=10.42 vs. limit=5.0 +2023-03-25 22:19:57,054 INFO [finetune.py:976] (5/7) Epoch 1, batch 2250, loss[loss=0.503, simple_loss=0.4771, pruned_loss=0.2644, over 4805.00 frames. ], tot_loss[loss=0.536, simple_loss=0.476, pruned_loss=0.3016, over 953418.71 frames. ], batch size: 45, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:20:08,641 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.09 vs. limit=5.0 +2023-03-25 22:20:18,371 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2267.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:20:20,123 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=2270.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:20:42,981 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=7.29 vs. limit=5.0 +2023-03-25 22:21:00,799 INFO [finetune.py:976] (5/7) Epoch 1, batch 2300, loss[loss=0.402, simple_loss=0.3813, pruned_loss=0.2114, over 4757.00 frames. ], tot_loss[loss=0.522, simple_loss=0.469, pruned_loss=0.2904, over 953149.83 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:21:15,882 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.050e+02 2.425e+02 2.921e+02 4.362e+02, threshold=4.850e+02, percent-clipped=0.0 +2023-03-25 22:21:21,995 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=2315.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:21:42,823 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2340.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:21:54,800 INFO [finetune.py:976] (5/7) Epoch 1, batch 2350, loss[loss=0.4721, simple_loss=0.4309, pruned_loss=0.2566, over 4816.00 frames. ], tot_loss[loss=0.5062, simple_loss=0.4591, pruned_loss=0.2788, over 954140.20 frames. ], batch size: 40, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:22:57,439 INFO [finetune.py:976] (5/7) Epoch 1, batch 2400, loss[loss=0.3757, simple_loss=0.3697, pruned_loss=0.1908, over 4753.00 frames. ], tot_loss[loss=0.4932, simple_loss=0.4503, pruned_loss=0.2698, over 953032.89 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:22:57,568 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2401.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 22:23:05,863 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.953e+02 2.427e+02 2.971e+02 6.309e+02, threshold=4.853e+02, percent-clipped=1.0 +2023-03-25 22:23:32,012 INFO [finetune.py:976] (5/7) Epoch 1, batch 2450, loss[loss=0.4341, simple_loss=0.3958, pruned_loss=0.2362, over 3992.00 frames. ], tot_loss[loss=0.4808, simple_loss=0.4425, pruned_loss=0.2609, over 953995.73 frames. ], batch size: 17, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:24:21,689 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2494.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:24:25,633 INFO [finetune.py:976] (5/7) Epoch 1, batch 2500, loss[loss=0.4836, simple_loss=0.4614, pruned_loss=0.2529, over 4795.00 frames. ], tot_loss[loss=0.4765, simple_loss=0.4417, pruned_loss=0.2567, over 954522.28 frames. ], batch size: 41, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:24:34,906 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2513.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:24:35,368 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 2.241e+02 2.593e+02 3.079e+02 4.323e+02, threshold=5.185e+02, percent-clipped=0.0 +2023-03-25 22:24:39,946 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1443, 1.4538, 0.9397, 1.9861, 2.1790, 1.7330, 1.4650, 2.0337], + device='cuda:5'), covar=tensor([0.1837, 0.2264, 0.2383, 0.1361, 0.2649, 0.1904, 0.1617, 0.2018], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0092, 0.0108, 0.0088, 0.0118, 0.0087, 0.0093, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 22:24:44,982 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2526.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:24:54,544 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=2542.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:25:00,192 INFO [finetune.py:976] (5/7) Epoch 1, batch 2550, loss[loss=0.4555, simple_loss=0.4382, pruned_loss=0.2364, over 4762.00 frames. ], tot_loss[loss=0.4728, simple_loss=0.4423, pruned_loss=0.2525, over 955378.52 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:25:09,686 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=2561.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:25:18,674 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=2574.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:25:21,009 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1249, 0.8926, 0.7265, 0.8382, 0.8825, 0.8005, 0.8038, 1.4068], + device='cuda:5'), covar=tensor([13.5716, 18.6662, 14.2568, 24.6103, 13.4621, 9.8613, 20.1325, 5.1070], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0228, 0.0203, 0.0263, 0.0221, 0.0189, 0.0229, 0.0169], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 22:25:27,834 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=6.42 vs. limit=5.0 +2023-03-25 22:25:30,485 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-03-25 22:25:48,343 INFO [finetune.py:976] (5/7) Epoch 1, batch 2600, loss[loss=0.4667, simple_loss=0.4405, pruned_loss=0.2465, over 4728.00 frames. ], tot_loss[loss=0.4684, simple_loss=0.4407, pruned_loss=0.2487, over 956005.88 frames. ], batch size: 54, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:25:55,826 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.205e+02 2.587e+02 2.996e+02 4.228e+02, threshold=5.174e+02, percent-clipped=0.0 +2023-03-25 22:26:07,167 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3394, 1.3655, 1.3127, 1.2875, 0.7297, 2.1662, 0.5351, 1.2112], + device='cuda:5'), covar=tensor([0.3857, 0.2661, 0.2366, 0.2663, 0.2258, 0.0459, 0.2804, 0.1545], + device='cuda:5'), in_proj_covar=tensor([0.0115, 0.0099, 0.0107, 0.0104, 0.0095, 0.0084, 0.0081, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-25 22:26:19,960 INFO [finetune.py:976] (5/7) Epoch 1, batch 2650, loss[loss=0.4839, simple_loss=0.4604, pruned_loss=0.2537, over 4822.00 frames. ], tot_loss[loss=0.464, simple_loss=0.4392, pruned_loss=0.2449, over 956762.09 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:26:48,633 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.92 vs. limit=5.0 +2023-03-25 22:26:54,475 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2685.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:27:06,868 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2696.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:27:15,138 INFO [finetune.py:976] (5/7) Epoch 1, batch 2700, loss[loss=0.4602, simple_loss=0.4444, pruned_loss=0.2379, over 4818.00 frames. ], tot_loss[loss=0.4558, simple_loss=0.4345, pruned_loss=0.2389, over 956193.81 frames. ], batch size: 39, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:27:28,255 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.346e+02 2.127e+02 2.493e+02 3.058e+02 5.200e+02, threshold=4.985e+02, percent-clipped=1.0 +2023-03-25 22:28:14,210 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-25 22:28:14,551 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2746.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:28:17,277 INFO [finetune.py:976] (5/7) Epoch 1, batch 2750, loss[loss=0.392, simple_loss=0.3803, pruned_loss=0.2019, over 4810.00 frames. ], tot_loss[loss=0.4446, simple_loss=0.4265, pruned_loss=0.2317, over 956065.82 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:28:31,572 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6448, 1.1701, 0.8657, 1.4921, 2.0300, 1.2403, 1.2619, 1.6816], + device='cuda:5'), covar=tensor([0.2349, 0.2835, 0.2430, 0.1674, 0.2832, 0.2559, 0.1927, 0.2568], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0092, 0.0109, 0.0089, 0.0120, 0.0088, 0.0094, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 22:28:58,719 INFO [finetune.py:976] (5/7) Epoch 1, batch 2800, loss[loss=0.425, simple_loss=0.4094, pruned_loss=0.2203, over 4831.00 frames. ], tot_loss[loss=0.4333, simple_loss=0.418, pruned_loss=0.2245, over 956842.80 frames. ], batch size: 41, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:29:06,643 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.264e+02 2.537e+02 3.001e+02 5.007e+02, threshold=5.073e+02, percent-clipped=1.0 +2023-03-25 22:29:12,571 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2824.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:29:40,284 INFO [finetune.py:976] (5/7) Epoch 1, batch 2850, loss[loss=0.4367, simple_loss=0.416, pruned_loss=0.2287, over 4821.00 frames. ], tot_loss[loss=0.4258, simple_loss=0.4128, pruned_loss=0.2196, over 957684.71 frames. ], batch size: 38, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:29:59,392 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4952, 1.1837, 1.3558, 1.4212, 2.0997, 1.3489, 1.2311, 1.1331], + device='cuda:5'), covar=tensor([0.3832, 0.4190, 0.3266, 0.3388, 0.3085, 0.2551, 0.5256, 0.2998], + device='cuda:5'), in_proj_covar=tensor([0.0217, 0.0201, 0.0189, 0.0175, 0.0223, 0.0174, 0.0198, 0.0175], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:30:03,459 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2885.0, num_to_drop=2, layers_to_drop={0, 2} +2023-03-25 22:30:15,599 INFO [finetune.py:976] (5/7) Epoch 1, batch 2900, loss[loss=0.3108, simple_loss=0.3305, pruned_loss=0.1455, over 4707.00 frames. ], tot_loss[loss=0.4248, simple_loss=0.4134, pruned_loss=0.2183, over 957077.28 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:30:23,164 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 2.100e+02 2.461e+02 2.914e+02 6.574e+02, threshold=4.923e+02, percent-clipped=3.0 +2023-03-25 22:30:23,311 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6964, 1.4221, 1.9376, 1.2836, 1.5682, 1.7683, 1.5057, 2.1002], + device='cuda:5'), covar=tensor([0.2214, 0.2612, 0.1602, 0.2375, 0.1499, 0.2064, 0.2814, 0.1146], + device='cuda:5'), in_proj_covar=tensor([0.0184, 0.0194, 0.0191, 0.0179, 0.0160, 0.0201, 0.0201, 0.0177], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:30:34,100 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-25 22:30:37,137 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2937.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:30:50,914 INFO [finetune.py:976] (5/7) Epoch 1, batch 2950, loss[loss=0.4938, simple_loss=0.4724, pruned_loss=0.2577, over 4871.00 frames. ], tot_loss[loss=0.4253, simple_loss=0.4159, pruned_loss=0.2175, over 954765.42 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:31:02,668 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9973, 2.0371, 2.2833, 1.0232, 2.4729, 2.2599, 1.6138, 2.2342], + device='cuda:5'), covar=tensor([0.0711, 0.1313, 0.1424, 0.2723, 0.1014, 0.1736, 0.2007, 0.1154], + device='cuda:5'), in_proj_covar=tensor([0.0144, 0.0159, 0.0173, 0.0159, 0.0175, 0.0173, 0.0183, 0.0170], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:31:30,035 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2491, 1.1893, 1.1872, 0.6756, 1.0539, 1.5006, 1.3106, 1.2143], + device='cuda:5'), covar=tensor([0.0779, 0.0453, 0.0531, 0.0516, 0.0412, 0.0282, 0.0287, 0.0470], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0148, 0.0115, 0.0125, 0.0125, 0.0118, 0.0143, 0.0150], + device='cuda:5'), out_proj_covar=tensor([9.4139e-05, 1.1049e-04, 8.4681e-05, 9.1708e-05, 9.0383e-05, 8.7444e-05, + 1.0669e-04, 1.1175e-04], device='cuda:5') +2023-03-25 22:31:31,750 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2996.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:31:33,418 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2998.0, num_to_drop=2, layers_to_drop={1, 2} +2023-03-25 22:31:35,184 INFO [finetune.py:976] (5/7) Epoch 1, batch 3000, loss[loss=0.4171, simple_loss=0.4179, pruned_loss=0.2081, over 4841.00 frames. ], tot_loss[loss=0.4218, simple_loss=0.415, pruned_loss=0.2144, over 956446.89 frames. ], batch size: 49, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:31:35,184 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-25 22:31:46,738 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5207, 0.3741, 0.4650, 0.4755, 0.5148, 0.3587, 0.3756, 0.4622], + device='cuda:5'), covar=tensor([ 76.7281, 104.5256, 72.1985, 126.8866, 77.3231, 50.5928, 103.9600, + 24.2132], device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0225, 0.0203, 0.0262, 0.0219, 0.0188, 0.0226, 0.0167], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 22:31:56,383 INFO [finetune.py:1010] (5/7) Epoch 1, validation: loss=0.4228, simple_loss=0.4589, pruned_loss=0.1933, over 2265189.00 frames. +2023-03-25 22:31:56,384 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 5460MB +2023-03-25 22:32:17,071 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 2.092e+02 2.490e+02 2.940e+02 5.162e+02, threshold=4.980e+02, percent-clipped=2.0 +2023-03-25 22:32:25,839 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3019.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:32:38,092 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8430, 1.2948, 0.8265, 1.7220, 1.9741, 1.4849, 1.3557, 1.8645], + device='cuda:5'), covar=tensor([0.1533, 0.1938, 0.2216, 0.1153, 0.2359, 0.2130, 0.1347, 0.1619], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0092, 0.0110, 0.0089, 0.0120, 0.0090, 0.0094, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 22:32:39,248 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3041.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:32:40,986 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=3044.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:32:42,779 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8787, 1.9074, 1.7650, 1.2971, 2.3309, 2.1594, 1.9518, 1.7820], + device='cuda:5'), covar=tensor([0.0799, 0.0659, 0.0859, 0.1106, 0.0389, 0.0710, 0.0829, 0.1190], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0131, 0.0132, 0.0121, 0.0107, 0.0130, 0.0136, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:32:44,999 INFO [finetune.py:976] (5/7) Epoch 1, batch 3050, loss[loss=0.4123, simple_loss=0.4123, pruned_loss=0.2062, over 4882.00 frames. ], tot_loss[loss=0.4198, simple_loss=0.4145, pruned_loss=0.2126, over 957726.82 frames. ], batch size: 43, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:33:09,808 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3080.0, num_to_drop=2, layers_to_drop={0, 3} +2023-03-25 22:33:20,512 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=6.18 vs. limit=5.0 +2023-03-25 22:33:38,271 INFO [finetune.py:976] (5/7) Epoch 1, batch 3100, loss[loss=0.43, simple_loss=0.424, pruned_loss=0.2181, over 4885.00 frames. ], tot_loss[loss=0.4143, simple_loss=0.4105, pruned_loss=0.2091, over 956993.09 frames. ], batch size: 32, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:33:51,987 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 2.009e+02 2.458e+02 3.052e+02 4.298e+02, threshold=4.916e+02, percent-clipped=0.0 +2023-03-25 22:34:39,799 INFO [finetune.py:976] (5/7) Epoch 1, batch 3150, loss[loss=0.3717, simple_loss=0.3865, pruned_loss=0.1785, over 4905.00 frames. ], tot_loss[loss=0.4067, simple_loss=0.4045, pruned_loss=0.2045, over 957531.66 frames. ], batch size: 43, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:35:16,504 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3180.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:35:39,620 INFO [finetune.py:976] (5/7) Epoch 1, batch 3200, loss[loss=0.3346, simple_loss=0.3571, pruned_loss=0.1561, over 4895.00 frames. ], tot_loss[loss=0.3991, simple_loss=0.3985, pruned_loss=0.1999, over 957273.17 frames. ], batch size: 32, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:35:52,726 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.973e+02 2.320e+02 2.787e+02 5.091e+02, threshold=4.641e+02, percent-clipped=1.0 +2023-03-25 22:36:03,082 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4193, 1.0257, 1.0487, 0.9330, 1.3121, 1.6118, 1.4172, 0.9512], + device='cuda:5'), covar=tensor([0.0334, 0.0447, 0.0607, 0.0451, 0.0328, 0.0214, 0.0264, 0.0509], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0117, 0.0138, 0.0115, 0.0107, 0.0105, 0.0090, 0.0116], + device='cuda:5'), out_proj_covar=tensor([6.8989e-05, 9.2281e-05, 1.1220e-04, 9.1340e-05, 8.5408e-05, 7.9014e-05, + 6.9736e-05, 9.1389e-05], device='cuda:5') +2023-03-25 22:36:29,137 INFO [finetune.py:976] (5/7) Epoch 1, batch 3250, loss[loss=0.3128, simple_loss=0.3336, pruned_loss=0.146, over 4786.00 frames. ], tot_loss[loss=0.3993, simple_loss=0.3988, pruned_loss=0.1999, over 955986.57 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:36:41,039 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4787, 1.4855, 0.7031, 2.1369, 2.5474, 1.7500, 1.8078, 2.1224], + device='cuda:5'), covar=tensor([0.1595, 0.2142, 0.2404, 0.1191, 0.2150, 0.1884, 0.1409, 0.1906], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0093, 0.0110, 0.0089, 0.0120, 0.0090, 0.0095, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 22:37:01,544 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.91 vs. limit=5.0 +2023-03-25 22:37:07,591 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7372, 1.9495, 1.6714, 1.2710, 2.2190, 2.0735, 1.8667, 1.6858], + device='cuda:5'), covar=tensor([0.0880, 0.0664, 0.0970, 0.1150, 0.0472, 0.0852, 0.0875, 0.1294], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0132, 0.0133, 0.0122, 0.0108, 0.0131, 0.0137, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:37:11,512 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-25 22:37:12,286 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3288.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:37:19,996 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3293.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:37:24,554 INFO [finetune.py:976] (5/7) Epoch 1, batch 3300, loss[loss=0.3674, simple_loss=0.3758, pruned_loss=0.1796, over 4822.00 frames. ], tot_loss[loss=0.3997, simple_loss=0.401, pruned_loss=0.1993, over 956624.62 frames. ], batch size: 30, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:37:32,707 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.210e+02 2.512e+02 3.057e+02 4.555e+02, threshold=5.024e+02, percent-clipped=0.0 +2023-03-25 22:37:44,384 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.7599, 3.3136, 3.3990, 3.6707, 3.5062, 3.2988, 3.8056, 1.4005], + device='cuda:5'), covar=tensor([0.0836, 0.0950, 0.0880, 0.0895, 0.1430, 0.1364, 0.0842, 0.4150], + device='cuda:5'), in_proj_covar=tensor([0.0374, 0.0247, 0.0267, 0.0299, 0.0353, 0.0293, 0.0313, 0.0306], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:38:03,243 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3341.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:38:13,349 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3349.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 22:38:14,401 INFO [finetune.py:976] (5/7) Epoch 1, batch 3350, loss[loss=0.3034, simple_loss=0.3199, pruned_loss=0.1435, over 4681.00 frames. ], tot_loss[loss=0.4002, simple_loss=0.4028, pruned_loss=0.1988, over 956812.16 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:38:44,439 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3375.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:38:59,533 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=3389.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:39:06,749 INFO [finetune.py:976] (5/7) Epoch 1, batch 3400, loss[loss=0.403, simple_loss=0.4044, pruned_loss=0.2008, over 4869.00 frames. ], tot_loss[loss=0.4027, simple_loss=0.4057, pruned_loss=0.1998, over 957774.69 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:39:20,792 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.988e+02 2.392e+02 2.720e+02 4.202e+02, threshold=4.784e+02, percent-clipped=0.0 +2023-03-25 22:40:08,848 INFO [finetune.py:976] (5/7) Epoch 1, batch 3450, loss[loss=0.3816, simple_loss=0.3993, pruned_loss=0.182, over 4885.00 frames. ], tot_loss[loss=0.3989, simple_loss=0.4036, pruned_loss=0.1972, over 957764.48 frames. ], batch size: 43, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:40:25,066 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3299, 1.5268, 1.2139, 1.5830, 1.5771, 2.9352, 1.3369, 1.5903], + device='cuda:5'), covar=tensor([0.1200, 0.1791, 0.1365, 0.1183, 0.1640, 0.0290, 0.1572, 0.1887], + device='cuda:5'), in_proj_covar=tensor([0.0073, 0.0076, 0.0071, 0.0074, 0.0088, 0.0075, 0.0082, 0.0075], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 22:40:29,558 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2582, 1.2858, 1.3865, 0.8044, 1.5837, 1.3760, 1.2418, 1.2704], + device='cuda:5'), covar=tensor([0.0890, 0.0963, 0.0770, 0.1162, 0.0706, 0.0965, 0.1078, 0.1558], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0131, 0.0132, 0.0121, 0.0107, 0.0130, 0.0136, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:40:40,392 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3480.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:41:03,278 INFO [finetune.py:976] (5/7) Epoch 1, batch 3500, loss[loss=0.3596, simple_loss=0.3742, pruned_loss=0.1725, over 4764.00 frames. ], tot_loss[loss=0.3928, simple_loss=0.3981, pruned_loss=0.1937, over 955324.84 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:41:07,571 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1467, 3.5419, 3.7394, 4.0006, 3.8316, 3.7053, 4.2602, 1.4041], + device='cuda:5'), covar=tensor([0.0855, 0.0914, 0.0770, 0.1003, 0.1477, 0.1280, 0.0735, 0.4803], + device='cuda:5'), in_proj_covar=tensor([0.0371, 0.0245, 0.0264, 0.0296, 0.0350, 0.0290, 0.0311, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:41:14,914 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 2.271e+02 2.832e+02 3.824e+02 1.123e+03, threshold=5.664e+02, percent-clipped=12.0 +2023-03-25 22:41:30,604 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=3528.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:41:39,105 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6277, 1.2499, 2.2525, 3.1504, 2.0911, 2.4733, 1.2096, 2.5943], + device='cuda:5'), covar=tensor([0.1804, 0.2074, 0.1208, 0.0654, 0.1046, 0.1461, 0.1727, 0.0742], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0113, 0.0130, 0.0150, 0.0101, 0.0137, 0.0121, 0.0104], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 22:41:48,775 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3546.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:41:57,307 INFO [finetune.py:976] (5/7) Epoch 1, batch 3550, loss[loss=0.3492, simple_loss=0.3546, pruned_loss=0.172, over 4908.00 frames. ], tot_loss[loss=0.3898, simple_loss=0.3944, pruned_loss=0.1926, over 956487.49 frames. ], batch size: 37, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:42:45,285 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3593.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:42:50,248 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4643, 1.6246, 1.4496, 1.6423, 1.0307, 3.5175, 1.3231, 2.1073], + device='cuda:5'), covar=tensor([0.4283, 0.2759, 0.2562, 0.2664, 0.2323, 0.0230, 0.3079, 0.1498], + device='cuda:5'), in_proj_covar=tensor([0.0119, 0.0102, 0.0110, 0.0108, 0.0100, 0.0087, 0.0087, 0.0085], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0004, 0.0005, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 22:42:50,745 INFO [finetune.py:976] (5/7) Epoch 1, batch 3600, loss[loss=0.3584, simple_loss=0.3677, pruned_loss=0.1745, over 4907.00 frames. ], tot_loss[loss=0.3825, simple_loss=0.389, pruned_loss=0.188, over 957260.71 frames. ], batch size: 32, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:42:59,659 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3607.0, num_to_drop=2, layers_to_drop={1, 3} +2023-03-25 22:43:09,363 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.557e+02 2.866e+02 3.769e+02 9.044e+02, threshold=5.732e+02, percent-clipped=5.0 +2023-03-25 22:43:30,972 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0730, 2.6792, 2.7950, 3.0113, 2.8312, 2.7326, 3.1258, 1.1308], + device='cuda:5'), covar=tensor([0.1096, 0.1018, 0.0861, 0.1140, 0.1690, 0.1427, 0.1370, 0.4731], + device='cuda:5'), in_proj_covar=tensor([0.0374, 0.0247, 0.0266, 0.0298, 0.0352, 0.0291, 0.0314, 0.0305], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:43:31,271 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-03-25 22:43:44,103 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=3641.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:43:45,242 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2812, 1.9325, 1.9230, 0.7878, 1.9727, 1.8514, 1.5529, 2.0083], + device='cuda:5'), covar=tensor([0.0902, 0.1102, 0.1546, 0.2946, 0.1254, 0.2081, 0.2373, 0.1244], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0165, 0.0180, 0.0165, 0.0184, 0.0183, 0.0189, 0.0176], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:43:46,916 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3644.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 22:43:52,545 INFO [finetune.py:976] (5/7) Epoch 1, batch 3650, loss[loss=0.3686, simple_loss=0.3668, pruned_loss=0.1852, over 4720.00 frames. ], tot_loss[loss=0.3818, simple_loss=0.389, pruned_loss=0.1873, over 954196.00 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:44:08,617 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7148, 1.5000, 1.2616, 1.1110, 1.5282, 2.0605, 1.6362, 1.1361], + device='cuda:5'), covar=tensor([0.0215, 0.0416, 0.0502, 0.0484, 0.0306, 0.0166, 0.0288, 0.0470], + device='cuda:5'), in_proj_covar=tensor([0.0084, 0.0112, 0.0132, 0.0111, 0.0104, 0.0100, 0.0087, 0.0111], + device='cuda:5'), out_proj_covar=tensor([6.6157e-05, 8.8560e-05, 1.0692e-04, 8.7766e-05, 8.2885e-05, 7.5009e-05, + 6.7276e-05, 8.7113e-05], device='cuda:5') +2023-03-25 22:44:20,347 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3675.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:44:21,469 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3676.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:44:40,052 INFO [finetune.py:976] (5/7) Epoch 1, batch 3700, loss[loss=0.3021, simple_loss=0.3211, pruned_loss=0.1416, over 4773.00 frames. ], tot_loss[loss=0.3842, simple_loss=0.3924, pruned_loss=0.188, over 954283.44 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:44:52,817 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.567e+02 2.980e+02 3.536e+02 5.905e+02, threshold=5.959e+02, percent-clipped=1.0 +2023-03-25 22:45:02,324 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4390, 1.9741, 2.3457, 2.4833, 3.1467, 2.2836, 2.1873, 1.8475], + device='cuda:5'), covar=tensor([0.2952, 0.3364, 0.2202, 0.2537, 0.2256, 0.1717, 0.4095, 0.2358], + device='cuda:5'), in_proj_covar=tensor([0.0211, 0.0197, 0.0183, 0.0170, 0.0217, 0.0169, 0.0193, 0.0171], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:45:09,755 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=3723.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:45:23,644 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3737.0, num_to_drop=2, layers_to_drop={2, 3} +2023-03-25 22:45:31,923 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.11 vs. limit=5.0 +2023-03-25 22:45:43,733 INFO [finetune.py:976] (5/7) Epoch 1, batch 3750, loss[loss=0.4524, simple_loss=0.4356, pruned_loss=0.2346, over 4302.00 frames. ], tot_loss[loss=0.3849, simple_loss=0.3934, pruned_loss=0.1882, over 952562.32 frames. ], batch size: 65, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:45:53,811 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.39 vs. limit=5.0 +2023-03-25 22:45:58,986 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6406, 3.9640, 3.8924, 2.1855, 4.1654, 3.0032, 0.6101, 2.7570], + device='cuda:5'), covar=tensor([0.2194, 0.1330, 0.1297, 0.2735, 0.0697, 0.0889, 0.4373, 0.1219], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0153, 0.0158, 0.0123, 0.0148, 0.0113, 0.0139, 0.0115], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:46:33,629 INFO [finetune.py:976] (5/7) Epoch 1, batch 3800, loss[loss=0.3879, simple_loss=0.3982, pruned_loss=0.1888, over 4725.00 frames. ], tot_loss[loss=0.3841, simple_loss=0.3938, pruned_loss=0.1872, over 953197.96 frames. ], batch size: 54, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:46:47,080 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.141e+02 2.900e+02 3.620e+02 1.043e+03, threshold=5.800e+02, percent-clipped=4.0 +2023-03-25 22:46:55,576 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-03-25 22:47:22,183 INFO [finetune.py:976] (5/7) Epoch 1, batch 3850, loss[loss=0.4446, simple_loss=0.4372, pruned_loss=0.226, over 4902.00 frames. ], tot_loss[loss=0.3798, simple_loss=0.3904, pruned_loss=0.1846, over 949873.53 frames. ], batch size: 35, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:47:46,271 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0218, 2.0641, 1.7140, 1.3431, 2.5632, 2.3093, 2.0907, 1.9536], + device='cuda:5'), covar=tensor([0.0794, 0.0680, 0.0939, 0.1132, 0.0340, 0.0772, 0.0823, 0.1155], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0131, 0.0134, 0.0122, 0.0107, 0.0132, 0.0138, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:48:09,990 INFO [finetune.py:976] (5/7) Epoch 1, batch 3900, loss[loss=0.3704, simple_loss=0.3865, pruned_loss=0.1771, over 4783.00 frames. ], tot_loss[loss=0.373, simple_loss=0.3846, pruned_loss=0.1807, over 949825.34 frames. ], batch size: 29, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:48:10,640 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3902.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:48:20,246 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.264e+02 2.673e+02 3.196e+02 5.181e+02, threshold=5.346e+02, percent-clipped=0.0 +2023-03-25 22:48:25,231 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0672, 1.7244, 1.4193, 1.1237, 1.8062, 2.5037, 2.1045, 1.5638], + device='cuda:5'), covar=tensor([0.0237, 0.0456, 0.0516, 0.0561, 0.0432, 0.0161, 0.0235, 0.0458], + device='cuda:5'), in_proj_covar=tensor([0.0085, 0.0113, 0.0133, 0.0111, 0.0104, 0.0100, 0.0088, 0.0111], + device='cuda:5'), out_proj_covar=tensor([6.6379e-05, 8.9485e-05, 1.0762e-04, 8.8236e-05, 8.3087e-05, 7.5284e-05, + 6.7904e-05, 8.7624e-05], device='cuda:5') +2023-03-25 22:48:30,337 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3926.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:48:50,600 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3944.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:48:54,666 INFO [finetune.py:976] (5/7) Epoch 1, batch 3950, loss[loss=0.3812, simple_loss=0.3845, pruned_loss=0.1889, over 4924.00 frames. ], tot_loss[loss=0.3637, simple_loss=0.3773, pruned_loss=0.175, over 952698.11 frames. ], batch size: 37, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:49:45,864 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3987.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:49:53,767 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=3992.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:50:05,840 INFO [finetune.py:976] (5/7) Epoch 1, batch 4000, loss[loss=0.3758, simple_loss=0.3893, pruned_loss=0.1812, over 4931.00 frames. ], tot_loss[loss=0.3596, simple_loss=0.3739, pruned_loss=0.1727, over 954877.19 frames. ], batch size: 38, lr: 4.00e-03, grad_scale: 8.0 +2023-03-25 22:50:18,332 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.072e+02 2.562e+02 2.941e+02 5.028e+02, threshold=5.123e+02, percent-clipped=0.0 +2023-03-25 22:50:21,321 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.22 vs. limit=5.0 +2023-03-25 22:50:23,216 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-25 22:50:31,330 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4032.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 22:50:50,289 INFO [finetune.py:976] (5/7) Epoch 1, batch 4050, loss[loss=0.3808, simple_loss=0.395, pruned_loss=0.1833, over 4894.00 frames. ], tot_loss[loss=0.3648, simple_loss=0.3789, pruned_loss=0.1753, over 956690.18 frames. ], batch size: 35, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:51:12,799 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1329, 1.9542, 2.5778, 1.5388, 2.1859, 2.2804, 2.0418, 2.6929], + device='cuda:5'), covar=tensor([0.2599, 0.2675, 0.2088, 0.3164, 0.1598, 0.2401, 0.2762, 0.1351], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0198, 0.0195, 0.0185, 0.0167, 0.0209, 0.0204, 0.0185], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:51:46,311 INFO [finetune.py:976] (5/7) Epoch 1, batch 4100, loss[loss=0.3693, simple_loss=0.4003, pruned_loss=0.1692, over 4818.00 frames. ], tot_loss[loss=0.366, simple_loss=0.3819, pruned_loss=0.175, over 955239.57 frames. ], batch size: 33, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:52:00,029 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.010e+02 2.495e+02 2.957e+02 5.246e+02, threshold=4.990e+02, percent-clipped=1.0 +2023-03-25 22:52:34,828 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4141.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:52:43,039 INFO [finetune.py:976] (5/7) Epoch 1, batch 4150, loss[loss=0.3458, simple_loss=0.36, pruned_loss=0.1658, over 4720.00 frames. ], tot_loss[loss=0.3643, simple_loss=0.3812, pruned_loss=0.1737, over 955300.56 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:53:43,097 INFO [finetune.py:976] (5/7) Epoch 1, batch 4200, loss[loss=0.3636, simple_loss=0.3781, pruned_loss=0.1745, over 4837.00 frames. ], tot_loss[loss=0.3637, simple_loss=0.3816, pruned_loss=0.1729, over 956106.70 frames. ], batch size: 47, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:53:43,837 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4202.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 22:53:43,854 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4202.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:53:52,993 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-25 22:54:02,772 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 2.089e+02 2.432e+02 2.936e+02 5.530e+02, threshold=4.864e+02, percent-clipped=1.0 +2023-03-25 22:54:44,489 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=4250.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:54:45,013 INFO [finetune.py:976] (5/7) Epoch 1, batch 4250, loss[loss=0.3439, simple_loss=0.3704, pruned_loss=0.1587, over 4752.00 frames. ], tot_loss[loss=0.3567, simple_loss=0.3761, pruned_loss=0.1687, over 956983.08 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:55:23,652 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4282.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:55:27,311 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4288.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:55:29,219 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.16 vs. limit=5.0 +2023-03-25 22:55:35,146 INFO [finetune.py:976] (5/7) Epoch 1, batch 4300, loss[loss=0.3184, simple_loss=0.331, pruned_loss=0.153, over 4787.00 frames. ], tot_loss[loss=0.351, simple_loss=0.3712, pruned_loss=0.1655, over 956244.17 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:55:45,446 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.950e+02 2.267e+02 2.860e+02 4.056e+02, threshold=4.534e+02, percent-clipped=0.0 +2023-03-25 22:56:13,426 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4332.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 22:56:35,756 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4349.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:56:36,850 INFO [finetune.py:976] (5/7) Epoch 1, batch 4350, loss[loss=0.3317, simple_loss=0.3612, pruned_loss=0.1511, over 4931.00 frames. ], tot_loss[loss=0.3433, simple_loss=0.3644, pruned_loss=0.1611, over 955400.59 frames. ], batch size: 33, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:56:45,404 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.97 vs. limit=5.0 +2023-03-25 22:57:17,558 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=4380.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 22:57:22,567 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.5039, 3.8920, 4.0546, 4.3752, 4.2411, 4.0389, 4.5993, 1.6756], + device='cuda:5'), covar=tensor([0.0704, 0.0852, 0.0719, 0.0834, 0.1270, 0.1291, 0.0679, 0.4709], + device='cuda:5'), in_proj_covar=tensor([0.0369, 0.0245, 0.0266, 0.0294, 0.0348, 0.0289, 0.0310, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 22:57:44,938 INFO [finetune.py:976] (5/7) Epoch 1, batch 4400, loss[loss=0.4012, simple_loss=0.4137, pruned_loss=0.1943, over 4833.00 frames. ], tot_loss[loss=0.3452, simple_loss=0.366, pruned_loss=0.1622, over 956613.41 frames. ], batch size: 47, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:57:57,031 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.978e+02 2.430e+02 2.895e+02 4.966e+02, threshold=4.860e+02, percent-clipped=1.0 +2023-03-25 22:58:03,451 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7404, 3.3484, 3.4827, 2.0085, 3.6248, 2.6475, 1.2094, 2.4709], + device='cuda:5'), covar=tensor([0.2911, 0.1362, 0.1247, 0.2707, 0.0844, 0.0914, 0.3666, 0.1390], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0156, 0.0161, 0.0125, 0.0151, 0.0116, 0.0143, 0.0118], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-25 22:58:03,486 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5284, 1.7196, 1.7576, 1.8485, 1.6943, 3.1417, 1.4540, 1.8236], + device='cuda:5'), covar=tensor([0.1012, 0.1465, 0.1591, 0.1064, 0.1489, 0.0281, 0.1369, 0.1576], + device='cuda:5'), in_proj_covar=tensor([0.0073, 0.0076, 0.0072, 0.0075, 0.0088, 0.0076, 0.0082, 0.0075], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 22:58:25,800 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5092, 0.9455, 0.8094, 1.4178, 1.8628, 0.6610, 1.0617, 1.4115], + device='cuda:5'), covar=tensor([0.1855, 0.2545, 0.1967, 0.1381, 0.2453, 0.2201, 0.1705, 0.2117], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0095, 0.0113, 0.0090, 0.0122, 0.0093, 0.0096, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 22:58:28,111 INFO [finetune.py:976] (5/7) Epoch 1, batch 4450, loss[loss=0.3525, simple_loss=0.3814, pruned_loss=0.1618, over 4752.00 frames. ], tot_loss[loss=0.3479, simple_loss=0.3695, pruned_loss=0.1632, over 956655.19 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:59:09,845 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4497.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 22:59:12,226 INFO [finetune.py:976] (5/7) Epoch 1, batch 4500, loss[loss=0.351, simple_loss=0.3724, pruned_loss=0.1648, over 4881.00 frames. ], tot_loss[loss=0.3491, simple_loss=0.3712, pruned_loss=0.1635, over 957501.89 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 22:59:29,325 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 2.111e+02 2.516e+02 2.889e+02 5.762e+02, threshold=5.032e+02, percent-clipped=1.0 +2023-03-25 22:59:49,423 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7908, 1.8180, 1.6807, 1.2990, 2.2266, 1.9880, 1.8684, 1.7310], + device='cuda:5'), covar=tensor([0.0771, 0.0674, 0.0904, 0.1061, 0.0372, 0.0781, 0.0817, 0.1131], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0131, 0.0134, 0.0122, 0.0107, 0.0133, 0.0139, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:00:15,078 INFO [finetune.py:976] (5/7) Epoch 1, batch 4550, loss[loss=0.3282, simple_loss=0.3574, pruned_loss=0.1494, over 4891.00 frames. ], tot_loss[loss=0.3511, simple_loss=0.3734, pruned_loss=0.1644, over 956255.39 frames. ], batch size: 32, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:00:34,783 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1500, 1.2925, 1.4002, 0.7748, 1.1532, 1.5639, 1.4460, 1.2988], + device='cuda:5'), covar=tensor([0.0864, 0.0470, 0.0362, 0.0491, 0.0365, 0.0384, 0.0281, 0.0479], + device='cuda:5'), in_proj_covar=tensor([0.0117, 0.0141, 0.0110, 0.0119, 0.0119, 0.0110, 0.0134, 0.0138], + device='cuda:5'), out_proj_covar=tensor([8.8321e-05, 1.0492e-04, 8.0410e-05, 8.7054e-05, 8.5924e-05, 8.1488e-05, + 1.0001e-04, 1.0270e-04], device='cuda:5') +2023-03-25 23:00:55,784 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4582.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:01:25,593 INFO [finetune.py:976] (5/7) Epoch 1, batch 4600, loss[loss=0.3689, simple_loss=0.3852, pruned_loss=0.1763, over 4830.00 frames. ], tot_loss[loss=0.3476, simple_loss=0.3707, pruned_loss=0.1623, over 955305.25 frames. ], batch size: 30, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:01:38,111 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 2.082e+02 2.456e+02 3.064e+02 5.977e+02, threshold=4.911e+02, percent-clipped=1.0 +2023-03-25 23:01:59,353 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=4630.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:02:18,007 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4644.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:02:28,981 INFO [finetune.py:976] (5/7) Epoch 1, batch 4650, loss[loss=0.3121, simple_loss=0.3317, pruned_loss=0.1463, over 4901.00 frames. ], tot_loss[loss=0.3417, simple_loss=0.3655, pruned_loss=0.1589, over 956834.02 frames. ], batch size: 32, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:02:59,772 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-25 23:03:06,337 INFO [finetune.py:976] (5/7) Epoch 1, batch 4700, loss[loss=0.2875, simple_loss=0.3203, pruned_loss=0.1274, over 4761.00 frames. ], tot_loss[loss=0.3348, simple_loss=0.3594, pruned_loss=0.1551, over 954425.74 frames. ], batch size: 54, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:03:07,023 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-25 23:03:20,653 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.265e+02 1.870e+02 2.224e+02 2.796e+02 5.273e+02, threshold=4.448e+02, percent-clipped=2.0 +2023-03-25 23:03:33,363 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.88 vs. limit=5.0 +2023-03-25 23:03:48,479 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8657, 1.5966, 1.4706, 1.1587, 1.8752, 2.4457, 2.0337, 1.6082], + device='cuda:5'), covar=tensor([0.0334, 0.0513, 0.0566, 0.0576, 0.0449, 0.0217, 0.0303, 0.0484], + device='cuda:5'), in_proj_covar=tensor([0.0083, 0.0111, 0.0130, 0.0110, 0.0103, 0.0099, 0.0086, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.4912e-05, 8.8001e-05, 1.0550e-04, 8.7107e-05, 8.1934e-05, 7.4090e-05, + 6.6848e-05, 8.5690e-05], device='cuda:5') +2023-03-25 23:03:51,918 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3858, 3.8054, 3.9220, 4.2392, 4.0796, 3.8354, 4.4688, 1.5158], + device='cuda:5'), covar=tensor([0.0654, 0.0826, 0.0732, 0.0864, 0.1168, 0.1276, 0.0613, 0.4874], + device='cuda:5'), in_proj_covar=tensor([0.0371, 0.0246, 0.0268, 0.0297, 0.0349, 0.0290, 0.0313, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:03:59,441 INFO [finetune.py:976] (5/7) Epoch 1, batch 4750, loss[loss=0.4189, simple_loss=0.4071, pruned_loss=0.2154, over 4810.00 frames. ], tot_loss[loss=0.3307, simple_loss=0.3555, pruned_loss=0.153, over 953578.99 frames. ], batch size: 41, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:04:36,377 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4797.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:04:39,212 INFO [finetune.py:976] (5/7) Epoch 1, batch 4800, loss[loss=0.3728, simple_loss=0.3857, pruned_loss=0.1799, over 4861.00 frames. ], tot_loss[loss=0.3328, simple_loss=0.3579, pruned_loss=0.1539, over 954981.05 frames. ], batch size: 47, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:04:56,837 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 2.096e+02 2.556e+02 3.186e+02 5.883e+02, threshold=5.111e+02, percent-clipped=4.0 +2023-03-25 23:05:19,391 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-03-25 23:05:31,153 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4256, 1.6687, 1.7903, 2.0243, 1.8131, 4.1827, 1.3689, 1.9479], + device='cuda:5'), covar=tensor([0.1320, 0.1995, 0.1396, 0.1231, 0.1807, 0.0178, 0.1901, 0.2180], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0077, 0.0073, 0.0076, 0.0089, 0.0077, 0.0083, 0.0076], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 23:05:32,168 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=4845.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:05:41,983 INFO [finetune.py:976] (5/7) Epoch 1, batch 4850, loss[loss=0.3343, simple_loss=0.3543, pruned_loss=0.1572, over 4745.00 frames. ], tot_loss[loss=0.336, simple_loss=0.3623, pruned_loss=0.1548, over 955253.56 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:06:28,851 INFO [finetune.py:976] (5/7) Epoch 1, batch 4900, loss[loss=0.3763, simple_loss=0.3897, pruned_loss=0.1815, over 4895.00 frames. ], tot_loss[loss=0.3377, simple_loss=0.3638, pruned_loss=0.1557, over 953877.23 frames. ], batch size: 32, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:06:45,500 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.056e+02 2.408e+02 2.893e+02 5.886e+02, threshold=4.817e+02, percent-clipped=2.0 +2023-03-25 23:06:57,042 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6209, 1.5029, 1.1586, 1.1225, 1.7129, 1.9925, 1.7079, 1.2044], + device='cuda:5'), covar=tensor([0.0249, 0.0434, 0.0620, 0.0495, 0.0268, 0.0232, 0.0282, 0.0453], + device='cuda:5'), in_proj_covar=tensor([0.0083, 0.0111, 0.0130, 0.0110, 0.0103, 0.0098, 0.0086, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.4810e-05, 8.7772e-05, 1.0520e-04, 8.7044e-05, 8.1781e-05, 7.3563e-05, + 6.6646e-05, 8.5316e-05], device='cuda:5') +2023-03-25 23:07:15,554 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4944.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:07:15,588 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4139, 1.1983, 1.3635, 1.4065, 1.7648, 1.3857, 0.9684, 1.1592], + device='cuda:5'), covar=tensor([0.3254, 0.3322, 0.2685, 0.2593, 0.2874, 0.1990, 0.4072, 0.2565], + device='cuda:5'), in_proj_covar=tensor([0.0209, 0.0194, 0.0180, 0.0167, 0.0215, 0.0165, 0.0193, 0.0170], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:07:19,812 INFO [finetune.py:976] (5/7) Epoch 1, batch 4950, loss[loss=0.2725, simple_loss=0.3081, pruned_loss=0.1184, over 4711.00 frames. ], tot_loss[loss=0.3373, simple_loss=0.3644, pruned_loss=0.1551, over 953315.65 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:08:11,784 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=4992.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:08:22,749 INFO [finetune.py:976] (5/7) Epoch 1, batch 5000, loss[loss=0.2907, simple_loss=0.3272, pruned_loss=0.1271, over 4814.00 frames. ], tot_loss[loss=0.334, simple_loss=0.3615, pruned_loss=0.1533, over 955451.75 frames. ], batch size: 41, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:08:23,437 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5892, 1.5989, 1.9334, 3.0951, 2.2996, 2.2603, 0.8201, 2.4755], + device='cuda:5'), covar=tensor([0.2055, 0.1662, 0.1468, 0.0584, 0.0914, 0.1412, 0.2257, 0.0803], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0116, 0.0134, 0.0156, 0.0103, 0.0142, 0.0126, 0.0106], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 23:08:32,725 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 2.131e+02 2.461e+02 3.038e+02 5.796e+02, threshold=4.923e+02, percent-clipped=4.0 +2023-03-25 23:09:11,198 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.9727, 4.3031, 4.4335, 4.8018, 4.6488, 4.4721, 5.1045, 1.5169], + device='cuda:5'), covar=tensor([0.0641, 0.0762, 0.0688, 0.0854, 0.1156, 0.1094, 0.0476, 0.5193], + device='cuda:5'), in_proj_covar=tensor([0.0372, 0.0247, 0.0270, 0.0297, 0.0349, 0.0291, 0.0315, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:09:20,249 INFO [finetune.py:976] (5/7) Epoch 1, batch 5050, loss[loss=0.2845, simple_loss=0.3299, pruned_loss=0.1196, over 4867.00 frames. ], tot_loss[loss=0.3269, simple_loss=0.3554, pruned_loss=0.1493, over 954608.46 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:09:32,261 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5584, 1.3044, 1.4215, 1.4846, 1.7960, 1.4550, 1.0460, 1.2418], + device='cuda:5'), covar=tensor([0.2640, 0.2840, 0.2260, 0.2112, 0.2446, 0.1706, 0.3682, 0.2111], + device='cuda:5'), in_proj_covar=tensor([0.0210, 0.0195, 0.0181, 0.0167, 0.0215, 0.0165, 0.0194, 0.0170], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:09:47,167 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1036, 2.4657, 1.8297, 1.6129, 2.7250, 2.5965, 2.2797, 2.1953], + device='cuda:5'), covar=tensor([0.0867, 0.0584, 0.1024, 0.1134, 0.0638, 0.0720, 0.0891, 0.1000], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0129, 0.0134, 0.0122, 0.0106, 0.0132, 0.0138, 0.0158], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:10:01,168 INFO [finetune.py:976] (5/7) Epoch 1, batch 5100, loss[loss=0.3238, simple_loss=0.3523, pruned_loss=0.1477, over 4757.00 frames. ], tot_loss[loss=0.3206, simple_loss=0.3493, pruned_loss=0.146, over 952158.00 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:10:09,454 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.918e+02 2.379e+02 2.966e+02 8.444e+02, threshold=4.758e+02, percent-clipped=2.0 +2023-03-25 23:10:34,835 INFO [finetune.py:976] (5/7) Epoch 1, batch 5150, loss[loss=0.3107, simple_loss=0.3553, pruned_loss=0.1331, over 4811.00 frames. ], tot_loss[loss=0.3195, simple_loss=0.3485, pruned_loss=0.1452, over 953373.09 frames. ], batch size: 45, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:11:14,839 INFO [finetune.py:976] (5/7) Epoch 1, batch 5200, loss[loss=0.2906, simple_loss=0.3406, pruned_loss=0.1203, over 4851.00 frames. ], tot_loss[loss=0.3265, simple_loss=0.3551, pruned_loss=0.149, over 952117.70 frames. ], batch size: 49, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:11:18,657 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5973, 1.6231, 1.6586, 0.8161, 1.4775, 1.8051, 1.7908, 1.5118], + device='cuda:5'), covar=tensor([0.0838, 0.0521, 0.0405, 0.0695, 0.0393, 0.0355, 0.0294, 0.0472], + device='cuda:5'), in_proj_covar=tensor([0.0119, 0.0143, 0.0111, 0.0121, 0.0121, 0.0111, 0.0136, 0.0139], + device='cuda:5'), out_proj_covar=tensor([9.0189e-05, 1.0698e-04, 8.1228e-05, 8.8909e-05, 8.7917e-05, 8.2158e-05, + 1.0163e-04, 1.0313e-04], device='cuda:5') +2023-03-25 23:11:24,774 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.261e+02 2.570e+02 3.078e+02 5.221e+02, threshold=5.140e+02, percent-clipped=2.0 +2023-03-25 23:12:06,969 INFO [finetune.py:976] (5/7) Epoch 1, batch 5250, loss[loss=0.3401, simple_loss=0.381, pruned_loss=0.1496, over 4796.00 frames. ], tot_loss[loss=0.3289, simple_loss=0.3578, pruned_loss=0.15, over 952814.37 frames. ], batch size: 40, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:12:55,131 INFO [finetune.py:976] (5/7) Epoch 1, batch 5300, loss[loss=0.2664, simple_loss=0.3219, pruned_loss=0.1055, over 4849.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3584, pruned_loss=0.1499, over 951549.42 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:13:08,326 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 2.032e+02 2.465e+02 2.907e+02 4.480e+02, threshold=4.930e+02, percent-clipped=0.0 +2023-03-25 23:13:49,134 INFO [finetune.py:976] (5/7) Epoch 1, batch 5350, loss[loss=0.2792, simple_loss=0.3113, pruned_loss=0.1235, over 4818.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3562, pruned_loss=0.1474, over 952251.88 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:13:58,224 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-25 23:14:48,106 INFO [finetune.py:976] (5/7) Epoch 1, batch 5400, loss[loss=0.3056, simple_loss=0.3419, pruned_loss=0.1347, over 4771.00 frames. ], tot_loss[loss=0.3226, simple_loss=0.353, pruned_loss=0.1461, over 953400.59 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:14:55,972 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.226e+02 1.939e+02 2.339e+02 2.729e+02 4.650e+02, threshold=4.678e+02, percent-clipped=0.0 +2023-03-25 23:15:14,275 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0804, 1.5803, 2.2731, 1.5483, 2.0657, 2.1945, 1.5978, 2.2908], + device='cuda:5'), covar=tensor([0.1798, 0.2323, 0.1763, 0.2383, 0.1146, 0.1857, 0.2760, 0.1142], + device='cuda:5'), in_proj_covar=tensor([0.0199, 0.0201, 0.0199, 0.0190, 0.0171, 0.0216, 0.0208, 0.0191], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:15:39,348 INFO [finetune.py:976] (5/7) Epoch 1, batch 5450, loss[loss=0.24, simple_loss=0.2817, pruned_loss=0.09911, over 4789.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3487, pruned_loss=0.1437, over 955397.56 frames. ], batch size: 29, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:15:59,401 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.83 vs. limit=5.0 +2023-03-25 23:16:31,592 INFO [finetune.py:976] (5/7) Epoch 1, batch 5500, loss[loss=0.3153, simple_loss=0.3446, pruned_loss=0.143, over 4833.00 frames. ], tot_loss[loss=0.3142, simple_loss=0.345, pruned_loss=0.1417, over 957171.19 frames. ], batch size: 30, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:16:45,962 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 2.026e+02 2.277e+02 2.875e+02 1.009e+03, threshold=4.553e+02, percent-clipped=5.0 +2023-03-25 23:16:57,709 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5075, 1.2173, 1.3295, 1.4694, 1.9175, 1.4274, 1.1523, 1.2334], + device='cuda:5'), covar=tensor([0.2882, 0.3120, 0.2506, 0.2325, 0.2694, 0.1722, 0.3860, 0.2186], + device='cuda:5'), in_proj_covar=tensor([0.0212, 0.0198, 0.0183, 0.0169, 0.0218, 0.0166, 0.0197, 0.0172], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:17:09,137 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.00 vs. limit=5.0 +2023-03-25 23:17:20,557 INFO [finetune.py:976] (5/7) Epoch 1, batch 5550, loss[loss=0.3356, simple_loss=0.3614, pruned_loss=0.1549, over 4832.00 frames. ], tot_loss[loss=0.3164, simple_loss=0.3468, pruned_loss=0.143, over 956519.86 frames. ], batch size: 30, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:18:01,091 INFO [finetune.py:976] (5/7) Epoch 1, batch 5600, loss[loss=0.329, simple_loss=0.3722, pruned_loss=0.1429, over 4819.00 frames. ], tot_loss[loss=0.3202, simple_loss=0.3519, pruned_loss=0.1443, over 955994.29 frames. ], batch size: 33, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:18:19,452 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.839e+02 2.287e+02 2.793e+02 4.099e+02, threshold=4.573e+02, percent-clipped=0.0 +2023-03-25 23:18:59,405 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5650.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 23:18:59,879 INFO [finetune.py:976] (5/7) Epoch 1, batch 5650, loss[loss=0.2754, simple_loss=0.3215, pruned_loss=0.1146, over 4792.00 frames. ], tot_loss[loss=0.3196, simple_loss=0.3525, pruned_loss=0.1434, over 954996.04 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:19:19,123 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.02 vs. limit=5.0 +2023-03-25 23:19:35,522 INFO [finetune.py:976] (5/7) Epoch 1, batch 5700, loss[loss=0.3027, simple_loss=0.312, pruned_loss=0.1467, over 3981.00 frames. ], tot_loss[loss=0.3175, simple_loss=0.3484, pruned_loss=0.1434, over 935232.77 frames. ], batch size: 17, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:19:41,538 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5711.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 23:19:43,181 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.079e+02 1.818e+02 2.245e+02 2.685e+02 4.321e+02, threshold=4.489e+02, percent-clipped=0.0 +2023-03-25 23:19:44,698 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.16 vs. limit=5.0 +2023-03-25 23:20:08,411 INFO [finetune.py:976] (5/7) Epoch 2, batch 0, loss[loss=0.3291, simple_loss=0.3585, pruned_loss=0.1499, over 4720.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3585, pruned_loss=0.1499, over 4720.00 frames. ], batch size: 54, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:20:08,411 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-25 23:20:24,999 INFO [finetune.py:1010] (5/7) Epoch 2, validation: loss=0.2224, simple_loss=0.2847, pruned_loss=0.08, over 2265189.00 frames. +2023-03-25 23:20:25,000 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6316MB +2023-03-25 23:20:48,839 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3804, 1.6539, 1.0022, 1.6897, 1.6959, 1.2122, 2.5262, 1.4547], + device='cuda:5'), covar=tensor([0.1972, 0.3192, 0.4027, 0.3676, 0.2457, 0.1980, 0.2075, 0.2787], + device='cuda:5'), in_proj_covar=tensor([0.0157, 0.0186, 0.0226, 0.0239, 0.0200, 0.0172, 0.0186, 0.0180], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:20:56,835 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5755.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:21:22,833 INFO [finetune.py:976] (5/7) Epoch 2, batch 50, loss[loss=0.2992, simple_loss=0.3373, pruned_loss=0.1306, over 4816.00 frames. ], tot_loss[loss=0.3014, simple_loss=0.3398, pruned_loss=0.1315, over 216229.90 frames. ], batch size: 33, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:21:30,839 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5379, 1.5786, 1.2372, 1.6683, 1.8790, 1.3567, 2.2415, 1.5511], + device='cuda:5'), covar=tensor([0.3661, 0.7136, 0.6913, 0.7155, 0.4535, 0.3604, 0.4721, 0.5300], + device='cuda:5'), in_proj_covar=tensor([0.0156, 0.0185, 0.0225, 0.0238, 0.0200, 0.0172, 0.0186, 0.0179], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:21:54,358 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.870e+02 2.317e+02 2.912e+02 7.564e+02, threshold=4.633e+02, percent-clipped=3.0 +2023-03-25 23:21:55,685 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5816.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:22:11,076 INFO [finetune.py:976] (5/7) Epoch 2, batch 100, loss[loss=0.2953, simple_loss=0.3253, pruned_loss=0.1326, over 4776.00 frames. ], tot_loss[loss=0.2954, simple_loss=0.3313, pruned_loss=0.1298, over 379514.12 frames. ], batch size: 51, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:22:15,901 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4076, 1.2432, 1.7533, 1.1744, 1.5072, 1.5604, 1.2562, 1.7291], + device='cuda:5'), covar=tensor([0.1982, 0.2450, 0.1331, 0.1823, 0.1262, 0.1700, 0.2932, 0.1273], + device='cuda:5'), in_proj_covar=tensor([0.0198, 0.0200, 0.0199, 0.0190, 0.0171, 0.0216, 0.0208, 0.0192], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:22:36,192 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5858.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:22:49,648 INFO [finetune.py:976] (5/7) Epoch 2, batch 150, loss[loss=0.2622, simple_loss=0.3093, pruned_loss=0.1075, over 4775.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.3288, pruned_loss=0.1305, over 507086.85 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:23:14,052 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4152, 1.3439, 1.4710, 2.3845, 1.7405, 2.2059, 0.7500, 1.9903], + device='cuda:5'), covar=tensor([0.1841, 0.1517, 0.1255, 0.0733, 0.0969, 0.0983, 0.1756, 0.0789], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0119, 0.0137, 0.0160, 0.0105, 0.0145, 0.0129, 0.0108], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-25 23:23:18,197 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 1.883e+02 2.329e+02 2.858e+02 5.160e+02, threshold=4.657e+02, percent-clipped=2.0 +2023-03-25 23:23:21,829 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5919.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:23:28,110 INFO [finetune.py:976] (5/7) Epoch 2, batch 200, loss[loss=0.3328, simple_loss=0.364, pruned_loss=0.1508, over 4900.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.3293, pruned_loss=0.132, over 605431.91 frames. ], batch size: 32, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:23:29,245 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-03-25 23:23:30,072 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.04 vs. limit=5.0 +2023-03-25 23:23:45,589 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0000, 1.5930, 2.3032, 1.3621, 1.9247, 1.9803, 1.7948, 2.3211], + device='cuda:5'), covar=tensor([0.2084, 0.2495, 0.1679, 0.2887, 0.1261, 0.2147, 0.2837, 0.1262], + device='cuda:5'), in_proj_covar=tensor([0.0200, 0.0202, 0.0200, 0.0191, 0.0173, 0.0218, 0.0209, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:24:01,219 INFO [finetune.py:976] (5/7) Epoch 2, batch 250, loss[loss=0.3352, simple_loss=0.3846, pruned_loss=0.1429, over 4807.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3344, pruned_loss=0.1337, over 682296.92 frames. ], batch size: 45, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:24:11,280 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5044, 1.5255, 1.3953, 1.6181, 0.9917, 3.5162, 1.2949, 1.8585], + device='cuda:5'), covar=tensor([0.4198, 0.2900, 0.2507, 0.2651, 0.2502, 0.0206, 0.3234, 0.1839], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0106, 0.0112, 0.0113, 0.0107, 0.0091, 0.0094, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0003, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 23:24:20,416 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5990.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:24:42,067 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6006.0, num_to_drop=1, layers_to_drop={3} +2023-03-25 23:24:48,586 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.078e+02 1.968e+02 2.365e+02 2.842e+02 7.361e+02, threshold=4.731e+02, percent-clipped=2.0 +2023-03-25 23:25:01,946 INFO [finetune.py:976] (5/7) Epoch 2, batch 300, loss[loss=0.3354, simple_loss=0.3684, pruned_loss=0.1512, over 4902.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3412, pruned_loss=0.1366, over 743035.00 frames. ], batch size: 37, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:25:29,041 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6043.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:25:38,828 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6051.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:25:43,140 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6262, 1.4660, 2.0319, 3.0709, 2.2800, 2.2262, 0.8545, 2.4777], + device='cuda:5'), covar=tensor([0.1849, 0.1687, 0.1290, 0.0537, 0.0782, 0.1654, 0.2046, 0.0684], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0118, 0.0137, 0.0160, 0.0104, 0.0144, 0.0128, 0.0108], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-25 23:26:11,336 INFO [finetune.py:976] (5/7) Epoch 2, batch 350, loss[loss=0.2887, simple_loss=0.3404, pruned_loss=0.1185, over 4808.00 frames. ], tot_loss[loss=0.3089, simple_loss=0.3433, pruned_loss=0.1372, over 788594.03 frames. ], batch size: 40, lr: 4.00e-03, grad_scale: 32.0 +2023-03-25 23:26:35,015 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6104.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:26:39,146 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6111.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:26:41,480 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 2.097e+02 2.536e+02 2.955e+02 5.135e+02, threshold=5.071e+02, percent-clipped=1.0 +2023-03-25 23:26:59,639 INFO [finetune.py:976] (5/7) Epoch 2, batch 400, loss[loss=0.3516, simple_loss=0.3753, pruned_loss=0.1639, over 4887.00 frames. ], tot_loss[loss=0.3087, simple_loss=0.3437, pruned_loss=0.1369, over 826487.49 frames. ], batch size: 35, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:27:09,952 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6135.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:27:49,742 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6170.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:27:59,428 INFO [finetune.py:976] (5/7) Epoch 2, batch 450, loss[loss=0.2882, simple_loss=0.3078, pruned_loss=0.1343, over 4207.00 frames. ], tot_loss[loss=0.3067, simple_loss=0.3423, pruned_loss=0.1356, over 855128.95 frames. ], batch size: 18, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:28:14,486 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6196.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:28:25,685 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6203.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:28:33,051 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6211.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:28:34,828 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6214.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:28:35,342 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.932e+02 2.244e+02 2.718e+02 3.817e+02, threshold=4.487e+02, percent-clipped=0.0 +2023-03-25 23:28:45,039 INFO [finetune.py:976] (5/7) Epoch 2, batch 500, loss[loss=0.2561, simple_loss=0.3017, pruned_loss=0.1053, over 4786.00 frames. ], tot_loss[loss=0.3032, simple_loss=0.3388, pruned_loss=0.1338, over 877865.51 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:28:52,710 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6231.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:29:12,328 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-03-25 23:29:19,906 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2973, 2.9768, 3.0156, 3.2227, 3.0646, 2.9022, 3.3601, 1.1135], + device='cuda:5'), covar=tensor([0.0969, 0.0898, 0.0979, 0.1138, 0.1476, 0.1554, 0.1090, 0.4388], + device='cuda:5'), in_proj_covar=tensor([0.0368, 0.0245, 0.0268, 0.0297, 0.0346, 0.0289, 0.0310, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:29:25,581 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6264.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:29:26,769 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6266.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:29:35,128 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6272.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:29:35,758 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1931, 1.8656, 1.5232, 0.6215, 1.6981, 1.9268, 1.6280, 1.7971], + device='cuda:5'), covar=tensor([0.0611, 0.0903, 0.1384, 0.2043, 0.1136, 0.1805, 0.1995, 0.0885], + device='cuda:5'), in_proj_covar=tensor([0.0160, 0.0180, 0.0193, 0.0177, 0.0201, 0.0199, 0.0203, 0.0191], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:29:38,741 INFO [finetune.py:976] (5/7) Epoch 2, batch 550, loss[loss=0.2375, simple_loss=0.2831, pruned_loss=0.09597, over 4766.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3359, pruned_loss=0.1323, over 895373.80 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:30:17,693 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6306.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:30:18,847 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5867, 1.4946, 1.1321, 1.5581, 1.8028, 1.3620, 2.0729, 1.4845], + device='cuda:5'), covar=tensor([0.3850, 0.7934, 0.7877, 0.8161, 0.4650, 0.3678, 0.6401, 0.5764], + device='cuda:5'), in_proj_covar=tensor([0.0157, 0.0187, 0.0228, 0.0241, 0.0201, 0.0173, 0.0189, 0.0181], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:30:29,439 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.346e+02 1.947e+02 2.353e+02 2.718e+02 5.175e+02, threshold=4.705e+02, percent-clipped=1.0 +2023-03-25 23:30:47,520 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6327.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:30:48,006 INFO [finetune.py:976] (5/7) Epoch 2, batch 600, loss[loss=0.2698, simple_loss=0.3024, pruned_loss=0.1186, over 4771.00 frames. ], tot_loss[loss=0.3007, simple_loss=0.3361, pruned_loss=0.1327, over 906835.01 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:30:49,368 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2189, 3.6566, 3.8057, 4.1124, 3.9685, 3.7786, 4.3460, 1.4747], + device='cuda:5'), covar=tensor([0.0703, 0.0781, 0.0652, 0.0838, 0.1181, 0.1214, 0.0637, 0.4900], + device='cuda:5'), in_proj_covar=tensor([0.0370, 0.0246, 0.0269, 0.0298, 0.0348, 0.0290, 0.0311, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:30:49,768 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.19 vs. limit=5.0 +2023-03-25 23:31:07,299 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6346.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:31:13,141 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6354.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:31:28,049 INFO [finetune.py:976] (5/7) Epoch 2, batch 650, loss[loss=0.3226, simple_loss=0.3559, pruned_loss=0.1447, over 4817.00 frames. ], tot_loss[loss=0.3033, simple_loss=0.3388, pruned_loss=0.1339, over 917157.28 frames. ], batch size: 40, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:31:39,560 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4324, 1.2029, 0.9853, 1.0720, 1.1943, 1.0998, 1.1229, 2.0108], + device='cuda:5'), covar=tensor([3.6755, 3.5203, 2.9490, 4.4643, 2.5711, 2.0805, 3.5106, 1.0909], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0218, 0.0199, 0.0254, 0.0212, 0.0180, 0.0216, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 23:31:42,400 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6399.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:31:50,739 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6167, 1.6348, 1.5106, 1.7954, 1.1383, 3.6987, 1.4281, 1.9566], + device='cuda:5'), covar=tensor([0.3677, 0.2635, 0.2248, 0.2219, 0.2151, 0.0167, 0.3142, 0.1651], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0107, 0.0113, 0.0114, 0.0108, 0.0092, 0.0095, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-25 23:31:51,333 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6411.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:31:53,174 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4260, 1.1476, 1.0002, 0.9998, 1.1746, 1.1156, 1.1363, 1.9564], + device='cuda:5'), covar=tensor([3.1329, 2.8752, 2.4319, 3.7478, 2.2104, 1.6437, 2.8804, 0.8590], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0218, 0.0199, 0.0254, 0.0211, 0.0180, 0.0216, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 23:31:53,613 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.011e+02 2.373e+02 2.999e+02 4.783e+02, threshold=4.746e+02, percent-clipped=1.0 +2023-03-25 23:32:01,502 INFO [finetune.py:976] (5/7) Epoch 2, batch 700, loss[loss=0.3688, simple_loss=0.4035, pruned_loss=0.1671, over 4815.00 frames. ], tot_loss[loss=0.3037, simple_loss=0.3402, pruned_loss=0.1336, over 926413.97 frames. ], batch size: 39, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:32:22,204 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6459.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:32:24,552 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6462.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:32:27,972 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6467.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:32:34,494 INFO [finetune.py:976] (5/7) Epoch 2, batch 750, loss[loss=0.3163, simple_loss=0.3629, pruned_loss=0.1349, over 4872.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3423, pruned_loss=0.1339, over 932391.35 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:32:37,652 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6692, 1.8209, 1.9478, 2.1368, 1.8062, 3.1467, 1.5419, 1.8783], + device='cuda:5'), covar=tensor([0.0866, 0.1262, 0.1328, 0.0851, 0.1299, 0.0281, 0.1180, 0.1358], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0078, 0.0075, 0.0078, 0.0090, 0.0079, 0.0083, 0.0077], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 23:32:42,506 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6491.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:32:49,083 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-03-25 23:32:58,318 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6514.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:32:58,813 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.998e+02 2.267e+02 2.688e+02 5.596e+02, threshold=4.534e+02, percent-clipped=2.0 +2023-03-25 23:33:04,285 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6523.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 23:33:04,551 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-25 23:33:06,035 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6526.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:33:06,080 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3076, 1.3388, 1.3292, 0.7793, 1.5949, 1.2945, 1.2949, 1.3218], + device='cuda:5'), covar=tensor([0.0740, 0.0790, 0.0760, 0.1093, 0.0628, 0.0921, 0.0839, 0.1295], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0132, 0.0139, 0.0127, 0.0108, 0.0137, 0.0144, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:33:07,176 INFO [finetune.py:976] (5/7) Epoch 2, batch 800, loss[loss=0.2593, simple_loss=0.2949, pruned_loss=0.1118, over 4797.00 frames. ], tot_loss[loss=0.305, simple_loss=0.342, pruned_loss=0.134, over 938028.07 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:33:07,356 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6528.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:33:40,453 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6559.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:33:42,337 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6562.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:33:46,516 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6567.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:33:58,059 INFO [finetune.py:976] (5/7) Epoch 2, batch 850, loss[loss=0.3036, simple_loss=0.3343, pruned_loss=0.1365, over 4844.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3401, pruned_loss=0.1334, over 941020.87 frames. ], batch size: 49, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:34:37,095 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.789e+02 2.218e+02 2.697e+02 5.451e+02, threshold=4.436e+02, percent-clipped=1.0 +2023-03-25 23:34:42,623 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6622.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:34:46,287 INFO [finetune.py:976] (5/7) Epoch 2, batch 900, loss[loss=0.2543, simple_loss=0.3015, pruned_loss=0.1035, over 4856.00 frames. ], tot_loss[loss=0.2992, simple_loss=0.336, pruned_loss=0.1312, over 945748.91 frames. ], batch size: 47, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:34:47,133 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6629.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:35:02,807 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6646.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:35:11,588 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1931, 1.8777, 2.6601, 1.5723, 2.2451, 2.3459, 1.8539, 2.5200], + device='cuda:5'), covar=tensor([0.2024, 0.2316, 0.2016, 0.2754, 0.1196, 0.2025, 0.2858, 0.1232], + device='cuda:5'), in_proj_covar=tensor([0.0201, 0.0203, 0.0202, 0.0192, 0.0174, 0.0220, 0.0211, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:35:11,662 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-25 23:35:25,035 INFO [finetune.py:976] (5/7) Epoch 2, batch 950, loss[loss=0.3072, simple_loss=0.3461, pruned_loss=0.1342, over 4914.00 frames. ], tot_loss[loss=0.2974, simple_loss=0.334, pruned_loss=0.1304, over 947140.96 frames. ], batch size: 37, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:35:32,472 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6690.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:35:34,864 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6694.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:35:35,855 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-25 23:35:37,915 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6699.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:35:48,888 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.729e+02 2.077e+02 2.706e+02 4.933e+02, threshold=4.155e+02, percent-clipped=1.0 +2023-03-25 23:36:03,625 INFO [finetune.py:976] (5/7) Epoch 2, batch 1000, loss[loss=0.3168, simple_loss=0.3604, pruned_loss=0.1365, over 4891.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3374, pruned_loss=0.1322, over 948571.00 frames. ], batch size: 37, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:36:28,553 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6747.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:36:33,124 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.75 vs. limit=5.0 +2023-03-25 23:36:40,548 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0709, 0.8416, 0.9209, 0.2855, 0.6311, 1.0679, 1.0095, 0.9825], + device='cuda:5'), covar=tensor([0.0851, 0.0462, 0.0409, 0.0657, 0.0461, 0.0398, 0.0334, 0.0437], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0148, 0.0113, 0.0125, 0.0125, 0.0113, 0.0140, 0.0140], + device='cuda:5'), out_proj_covar=tensor([9.2515e-05, 1.1026e-04, 8.2543e-05, 9.1524e-05, 9.0555e-05, 8.3703e-05, + 1.0490e-04, 1.0354e-04], device='cuda:5') +2023-03-25 23:37:01,239 INFO [finetune.py:976] (5/7) Epoch 2, batch 1050, loss[loss=0.2832, simple_loss=0.3312, pruned_loss=0.1176, over 4709.00 frames. ], tot_loss[loss=0.3032, simple_loss=0.3404, pruned_loss=0.1329, over 948491.26 frames. ], batch size: 59, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:37:09,207 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6791.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:37:17,521 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0344, 2.1616, 2.2415, 1.1293, 2.4178, 2.1562, 1.6559, 1.9508], + device='cuda:5'), covar=tensor([0.0793, 0.1353, 0.1862, 0.2686, 0.1818, 0.2090, 0.2297, 0.1732], + device='cuda:5'), in_proj_covar=tensor([0.0160, 0.0181, 0.0194, 0.0179, 0.0202, 0.0201, 0.0205, 0.0192], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:37:21,072 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.76 vs. limit=5.0 +2023-03-25 23:37:26,911 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-25 23:37:29,664 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 2.050e+02 2.547e+02 2.958e+02 5.414e+02, threshold=5.095e+02, percent-clipped=8.0 +2023-03-25 23:37:37,328 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6818.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 23:37:40,269 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6823.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:37:47,840 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6826.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:37:48,963 INFO [finetune.py:976] (5/7) Epoch 2, batch 1100, loss[loss=0.3043, simple_loss=0.3436, pruned_loss=0.1326, over 4753.00 frames. ], tot_loss[loss=0.3042, simple_loss=0.342, pruned_loss=0.1332, over 952061.19 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:37:56,676 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6839.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:37:58,619 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5469, 0.9470, 1.1751, 1.1640, 1.0063, 1.0518, 1.1539, 1.1893], + device='cuda:5'), covar=tensor([2.4554, 4.9419, 3.2502, 4.0585, 4.3879, 2.9622, 5.5396, 2.9680], + device='cuda:5'), in_proj_covar=tensor([0.0217, 0.0250, 0.0236, 0.0263, 0.0241, 0.0213, 0.0272, 0.0209], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 23:38:18,856 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6859.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:38:23,768 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6867.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:38:28,924 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6874.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:38:31,315 INFO [finetune.py:976] (5/7) Epoch 2, batch 1150, loss[loss=0.3246, simple_loss=0.3535, pruned_loss=0.1478, over 4169.00 frames. ], tot_loss[loss=0.3045, simple_loss=0.3427, pruned_loss=0.1332, over 953891.65 frames. ], batch size: 65, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:38:31,437 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6878.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:38:36,047 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8377, 1.1397, 0.9116, 1.7547, 2.0884, 1.5037, 1.3943, 1.7830], + device='cuda:5'), covar=tensor([0.1374, 0.1969, 0.2109, 0.1022, 0.1996, 0.2179, 0.1342, 0.1702], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0096, 0.0115, 0.0092, 0.0123, 0.0095, 0.0098, 0.0093], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-25 23:38:39,979 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.95 vs. limit=5.0 +2023-03-25 23:38:52,178 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6907.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:38:53,401 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6909.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:38:56,973 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 2.014e+02 2.348e+02 2.865e+02 5.036e+02, threshold=4.696e+02, percent-clipped=0.0 +2023-03-25 23:38:57,045 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6915.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:39:05,084 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.02 vs. limit=5.0 +2023-03-25 23:39:08,212 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6922.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:39:17,355 INFO [finetune.py:976] (5/7) Epoch 2, batch 1200, loss[loss=0.2545, simple_loss=0.2866, pruned_loss=0.1112, over 4724.00 frames. ], tot_loss[loss=0.3017, simple_loss=0.3397, pruned_loss=0.1318, over 952596.12 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:39:20,539 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-25 23:39:31,076 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6939.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 23:39:48,353 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-03-25 23:39:49,899 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=6970.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:39:49,967 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6970.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:39:56,179 INFO [finetune.py:976] (5/7) Epoch 2, batch 1250, loss[loss=0.2504, simple_loss=0.2994, pruned_loss=0.1007, over 4931.00 frames. ], tot_loss[loss=0.2964, simple_loss=0.3346, pruned_loss=0.1291, over 954228.77 frames. ], batch size: 33, lr: 4.00e-03, grad_scale: 16.0 +2023-03-25 23:40:00,602 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6985.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:40:13,853 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1556, 1.4564, 0.9997, 1.2957, 1.4695, 2.5125, 1.2616, 1.5886], + device='cuda:5'), covar=tensor([0.1110, 0.1753, 0.1243, 0.1123, 0.1738, 0.0379, 0.1567, 0.1698], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0078, 0.0075, 0.0078, 0.0090, 0.0079, 0.0084, 0.0077], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 23:40:23,991 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.918e+02 2.412e+02 2.798e+02 4.765e+02, threshold=4.825e+02, percent-clipped=1.0 +2023-03-25 23:40:31,021 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.11 vs. limit=2.0 +2023-03-25 23:40:39,100 INFO [finetune.py:976] (5/7) Epoch 2, batch 1300, loss[loss=0.2434, simple_loss=0.2897, pruned_loss=0.09848, over 4774.00 frames. ], tot_loss[loss=0.292, simple_loss=0.33, pruned_loss=0.127, over 954378.31 frames. ], batch size: 28, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:40:41,757 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-25 23:40:51,036 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4028, 2.0403, 1.7145, 0.8411, 1.8499, 1.9998, 1.6459, 1.9227], + device='cuda:5'), covar=tensor([0.0651, 0.0979, 0.1543, 0.2255, 0.1316, 0.2154, 0.2305, 0.0993], + device='cuda:5'), in_proj_covar=tensor([0.0162, 0.0184, 0.0197, 0.0180, 0.0204, 0.0203, 0.0207, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:40:52,840 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7048.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:41:17,563 INFO [finetune.py:976] (5/7) Epoch 2, batch 1350, loss[loss=0.2574, simple_loss=0.3114, pruned_loss=0.1017, over 4930.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.329, pruned_loss=0.1261, over 955358.00 frames. ], batch size: 38, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:41:17,690 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2670, 1.8763, 2.8726, 1.6588, 2.3771, 2.5305, 2.1748, 2.6234], + device='cuda:5'), covar=tensor([0.2054, 0.2502, 0.1783, 0.2796, 0.1264, 0.1967, 0.2582, 0.1258], + device='cuda:5'), in_proj_covar=tensor([0.0203, 0.0204, 0.0203, 0.0194, 0.0176, 0.0222, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:41:26,195 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4743, 1.2848, 1.0914, 1.2019, 1.2812, 1.1863, 1.1891, 1.9703], + device='cuda:5'), covar=tensor([3.1288, 3.0410, 2.3612, 3.6756, 2.3960, 1.7471, 2.9510, 0.9886], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0223, 0.0203, 0.0260, 0.0217, 0.0184, 0.0220, 0.0165], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 23:41:41,177 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7109.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 23:41:47,303 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 1.876e+02 2.208e+02 2.586e+02 5.614e+02, threshold=4.416e+02, percent-clipped=5.0 +2023-03-25 23:41:49,210 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7118.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 23:41:52,282 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7123.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:41:55,447 INFO [finetune.py:976] (5/7) Epoch 2, batch 1400, loss[loss=0.3013, simple_loss=0.3386, pruned_loss=0.132, over 4889.00 frames. ], tot_loss[loss=0.2925, simple_loss=0.3318, pruned_loss=0.1266, over 956182.05 frames. ], batch size: 32, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:42:31,286 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7163.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:42:33,030 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7166.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:42:36,038 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7171.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:42:40,185 INFO [finetune.py:976] (5/7) Epoch 2, batch 1450, loss[loss=0.3091, simple_loss=0.3494, pruned_loss=0.1345, over 4829.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3342, pruned_loss=0.127, over 956358.49 frames. ], batch size: 30, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:43:09,470 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-25 23:43:29,961 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.933e+02 2.198e+02 2.731e+02 4.077e+02, threshold=4.395e+02, percent-clipped=0.0 +2023-03-25 23:43:40,435 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7224.0, num_to_drop=1, layers_to_drop={2} +2023-03-25 23:43:42,758 INFO [finetune.py:976] (5/7) Epoch 2, batch 1500, loss[loss=0.3129, simple_loss=0.3545, pruned_loss=0.1357, over 4909.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.3347, pruned_loss=0.127, over 954590.98 frames. ], batch size: 43, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:43:52,000 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7234.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 23:44:25,654 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4857, 1.2539, 1.0241, 1.1269, 1.2121, 1.1535, 1.1769, 1.9800], + device='cuda:5'), covar=tensor([2.9111, 2.7281, 2.3542, 3.4082, 2.2457, 1.6037, 2.6972, 0.9123], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0224, 0.0204, 0.0260, 0.0217, 0.0185, 0.0222, 0.0166], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 23:44:36,527 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7265.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:44:47,928 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7275.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:44:49,672 INFO [finetune.py:976] (5/7) Epoch 2, batch 1550, loss[loss=0.2964, simple_loss=0.3266, pruned_loss=0.1331, over 4724.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3345, pruned_loss=0.1267, over 955179.72 frames. ], batch size: 59, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:44:57,966 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7285.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:45:31,203 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.989e+02 2.300e+02 2.720e+02 4.709e+02, threshold=4.600e+02, percent-clipped=4.0 +2023-03-25 23:45:39,142 INFO [finetune.py:976] (5/7) Epoch 2, batch 1600, loss[loss=0.3022, simple_loss=0.3378, pruned_loss=0.1333, over 4929.00 frames. ], tot_loss[loss=0.2931, simple_loss=0.3328, pruned_loss=0.1267, over 952707.82 frames. ], batch size: 38, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:45:42,236 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7333.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:45:44,190 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7336.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:46:35,387 INFO [finetune.py:976] (5/7) Epoch 2, batch 1650, loss[loss=0.2684, simple_loss=0.3038, pruned_loss=0.1165, over 4913.00 frames. ], tot_loss[loss=0.2883, simple_loss=0.328, pruned_loss=0.1243, over 951427.47 frames. ], batch size: 46, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:46:40,071 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-25 23:46:59,578 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7404.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:47:01,923 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7407.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:47:07,158 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.965e+02 2.306e+02 2.769e+02 7.650e+02, threshold=4.611e+02, percent-clipped=3.0 +2023-03-25 23:47:15,138 INFO [finetune.py:976] (5/7) Epoch 2, batch 1700, loss[loss=0.2537, simple_loss=0.3044, pruned_loss=0.1014, over 4927.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3241, pruned_loss=0.1224, over 954196.27 frames. ], batch size: 38, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:47:48,746 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7468.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:48:00,186 INFO [finetune.py:976] (5/7) Epoch 2, batch 1750, loss[loss=0.2413, simple_loss=0.2852, pruned_loss=0.09871, over 4755.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3257, pruned_loss=0.123, over 952824.03 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:48:46,572 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 2.019e+02 2.351e+02 2.794e+02 5.482e+02, threshold=4.701e+02, percent-clipped=1.0 +2023-03-25 23:48:53,187 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7519.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:49:03,494 INFO [finetune.py:976] (5/7) Epoch 2, batch 1800, loss[loss=0.3256, simple_loss=0.3516, pruned_loss=0.1498, over 4891.00 frames. ], tot_loss[loss=0.29, simple_loss=0.33, pruned_loss=0.125, over 953775.29 frames. ], batch size: 32, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:49:12,373 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7534.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:49:44,259 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7565.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:49:57,764 INFO [finetune.py:976] (5/7) Epoch 2, batch 1850, loss[loss=0.2371, simple_loss=0.269, pruned_loss=0.1026, over 4695.00 frames. ], tot_loss[loss=0.2918, simple_loss=0.3314, pruned_loss=0.1261, over 953388.78 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:49:58,314 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-25 23:50:05,101 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7582.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:50:47,092 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7613.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:50:48,710 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.140e+02 2.552e+02 3.133e+02 4.516e+02, threshold=5.105e+02, percent-clipped=0.0 +2023-03-25 23:51:00,619 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7624.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:51:02,926 INFO [finetune.py:976] (5/7) Epoch 2, batch 1900, loss[loss=0.2233, simple_loss=0.2841, pruned_loss=0.08126, over 4764.00 frames. ], tot_loss[loss=0.2924, simple_loss=0.332, pruned_loss=0.1264, over 953560.50 frames. ], batch size: 27, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:51:09,872 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7631.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:51:41,824 INFO [finetune.py:976] (5/7) Epoch 2, batch 1950, loss[loss=0.3071, simple_loss=0.3328, pruned_loss=0.1407, over 4868.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.3293, pruned_loss=0.1245, over 952803.07 frames. ], batch size: 31, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:51:47,813 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7685.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:51:59,371 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7704.0, num_to_drop=1, layers_to_drop={1} +2023-03-25 23:52:05,320 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-03-25 23:52:07,436 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.951e+02 2.306e+02 2.786e+02 5.176e+02, threshold=4.611e+02, percent-clipped=1.0 +2023-03-25 23:52:17,328 INFO [finetune.py:976] (5/7) Epoch 2, batch 2000, loss[loss=0.3076, simple_loss=0.333, pruned_loss=0.1411, over 4822.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3249, pruned_loss=0.1219, over 952224.36 frames. ], batch size: 41, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:52:20,797 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-25 23:52:26,513 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7743.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:52:31,889 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7752.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:52:39,939 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7763.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:52:49,965 INFO [finetune.py:976] (5/7) Epoch 2, batch 2050, loss[loss=0.2959, simple_loss=0.3358, pruned_loss=0.128, over 4824.00 frames. ], tot_loss[loss=0.2799, simple_loss=0.3207, pruned_loss=0.1195, over 952440.66 frames. ], batch size: 39, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:53:05,278 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7802.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:53:06,489 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7804.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:53:10,236 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3975, 1.4681, 1.5943, 0.9166, 1.3144, 1.6844, 1.7399, 1.4737], + device='cuda:5'), covar=tensor([0.1013, 0.0688, 0.0429, 0.0765, 0.0487, 0.0530, 0.0289, 0.0584], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0149, 0.0114, 0.0126, 0.0126, 0.0113, 0.0140, 0.0139], + device='cuda:5'), out_proj_covar=tensor([9.3320e-05, 1.1094e-04, 8.3240e-05, 9.2833e-05, 9.1273e-05, 8.3558e-05, + 1.0479e-04, 1.0286e-04], device='cuda:5') +2023-03-25 23:53:14,191 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5963, 0.7546, 1.1994, 1.1867, 1.1412, 1.2251, 1.1052, 1.3520], + device='cuda:5'), covar=tensor([2.2835, 4.7204, 3.4549, 3.5325, 4.2805, 2.6455, 5.0746, 3.0060], + device='cuda:5'), in_proj_covar=tensor([0.0222, 0.0255, 0.0242, 0.0268, 0.0245, 0.0218, 0.0277, 0.0213], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-25 23:53:14,628 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.310e+02 1.893e+02 2.255e+02 2.795e+02 6.508e+02, threshold=4.510e+02, percent-clipped=3.0 +2023-03-25 23:53:17,155 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7819.0, num_to_drop=1, layers_to_drop={0} +2023-03-25 23:53:26,209 INFO [finetune.py:976] (5/7) Epoch 2, batch 2100, loss[loss=0.2829, simple_loss=0.3157, pruned_loss=0.1251, over 4836.00 frames. ], tot_loss[loss=0.2798, simple_loss=0.3206, pruned_loss=0.1194, over 953547.71 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:53:38,292 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3908, 1.2988, 1.3748, 1.3082, 0.7391, 2.2661, 0.6635, 1.2650], + device='cuda:5'), covar=tensor([0.3626, 0.2545, 0.2235, 0.2415, 0.2334, 0.0359, 0.3169, 0.1608], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0107, 0.0113, 0.0114, 0.0110, 0.0093, 0.0097, 0.0093], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-25 23:53:46,457 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5513, 1.8540, 1.9033, 1.8800, 1.9346, 4.1592, 1.5096, 1.9344], + device='cuda:5'), covar=tensor([0.1069, 0.1577, 0.1191, 0.1134, 0.1525, 0.0162, 0.1462, 0.1765], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0078, 0.0075, 0.0078, 0.0090, 0.0080, 0.0083, 0.0077], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-25 23:53:47,055 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6300, 1.5080, 2.2567, 3.2025, 2.2341, 2.2754, 1.3965, 2.5794], + device='cuda:5'), covar=tensor([0.1856, 0.1563, 0.1176, 0.0564, 0.0843, 0.1446, 0.1574, 0.0644], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0119, 0.0139, 0.0162, 0.0105, 0.0145, 0.0131, 0.0108], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-25 23:53:56,807 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7863.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:54:00,169 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7867.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:54:08,365 INFO [finetune.py:976] (5/7) Epoch 2, batch 2150, loss[loss=0.3668, simple_loss=0.3986, pruned_loss=0.1676, over 4797.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3252, pruned_loss=0.1218, over 954822.36 frames. ], batch size: 45, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:54:09,087 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7036, 1.5537, 1.2414, 1.6838, 1.9158, 1.4020, 2.1200, 1.5888], + device='cuda:5'), covar=tensor([0.2989, 0.6354, 0.6748, 0.5879, 0.3977, 0.3125, 0.7315, 0.4194], + device='cuda:5'), in_proj_covar=tensor([0.0161, 0.0193, 0.0236, 0.0248, 0.0209, 0.0179, 0.0199, 0.0186], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-25 23:54:49,444 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-03-25 23:54:49,702 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.034e+02 1.983e+02 2.392e+02 2.856e+02 4.131e+02, threshold=4.785e+02, percent-clipped=0.0 +2023-03-25 23:55:10,160 INFO [finetune.py:976] (5/7) Epoch 2, batch 2200, loss[loss=0.329, simple_loss=0.3559, pruned_loss=0.1511, over 4741.00 frames. ], tot_loss[loss=0.2857, simple_loss=0.3267, pruned_loss=0.1223, over 954281.05 frames. ], batch size: 54, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:55:16,957 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7931.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:56:12,553 INFO [finetune.py:976] (5/7) Epoch 2, batch 2250, loss[loss=0.3021, simple_loss=0.3579, pruned_loss=0.1232, over 4779.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3292, pruned_loss=0.1232, over 953986.88 frames. ], batch size: 51, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:56:13,680 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=7979.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:56:19,273 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7980.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:57:05,129 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.899e+02 2.269e+02 2.653e+02 4.132e+02, threshold=4.538e+02, percent-clipped=0.0 +2023-03-25 23:57:24,778 INFO [finetune.py:976] (5/7) Epoch 2, batch 2300, loss[loss=0.3775, simple_loss=0.3941, pruned_loss=0.1804, over 4837.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3298, pruned_loss=0.1229, over 954263.00 frames. ], batch size: 49, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:57:53,440 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8063.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:58:05,650 INFO [finetune.py:976] (5/7) Epoch 2, batch 2350, loss[loss=0.249, simple_loss=0.2933, pruned_loss=0.1023, over 4750.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3256, pruned_loss=0.121, over 950935.86 frames. ], batch size: 28, lr: 3.99e-03, grad_scale: 16.0 +2023-03-25 23:58:20,627 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8099.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:58:39,061 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=8111.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:58:41,410 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.949e+02 2.211e+02 2.649e+02 5.737e+02, threshold=4.422e+02, percent-clipped=2.0 +2023-03-25 23:59:01,109 INFO [finetune.py:976] (5/7) Epoch 2, batch 2400, loss[loss=0.2569, simple_loss=0.2923, pruned_loss=0.1107, over 4715.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3218, pruned_loss=0.1195, over 952083.32 frames. ], batch size: 59, lr: 3.99e-03, grad_scale: 32.0 +2023-03-25 23:59:27,554 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8158.0, num_to_drop=0, layers_to_drop=set() +2023-03-25 23:59:39,736 INFO [finetune.py:976] (5/7) Epoch 2, batch 2450, loss[loss=0.2794, simple_loss=0.3183, pruned_loss=0.1203, over 4816.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3182, pruned_loss=0.1175, over 953584.66 frames. ], batch size: 39, lr: 3.99e-03, grad_scale: 32.0 +2023-03-25 23:59:40,545 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.83 vs. limit=5.0 +2023-03-26 00:00:01,379 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5856, 1.3429, 1.1092, 1.2189, 1.3297, 1.2583, 1.2492, 2.1821], + device='cuda:5'), covar=tensor([2.2950, 2.2499, 1.8650, 2.6355, 1.8055, 1.2824, 2.3351, 0.6427], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0229, 0.0208, 0.0265, 0.0221, 0.0187, 0.0226, 0.0169], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:00:12,508 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3272, 1.3629, 1.5250, 0.7920, 1.2821, 1.6791, 1.5994, 1.4048], + device='cuda:5'), covar=tensor([0.1262, 0.0784, 0.0526, 0.0792, 0.0480, 0.0555, 0.0394, 0.0666], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0114, 0.0127, 0.0126, 0.0114, 0.0140, 0.0139], + device='cuda:5'), out_proj_covar=tensor([9.3581e-05, 1.1172e-04, 8.3372e-05, 9.3377e-05, 9.1498e-05, 8.4081e-05, + 1.0499e-04, 1.0331e-04], device='cuda:5') +2023-03-26 00:00:20,950 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.358e+02 1.915e+02 2.165e+02 2.652e+02 4.234e+02, threshold=4.330e+02, percent-clipped=0.0 +2023-03-26 00:00:34,016 INFO [finetune.py:976] (5/7) Epoch 2, batch 2500, loss[loss=0.3222, simple_loss=0.3593, pruned_loss=0.1425, over 4751.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3206, pruned_loss=0.1186, over 955110.03 frames. ], batch size: 54, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:01:11,704 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8265.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:01:22,187 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-03-26 00:01:30,123 INFO [finetune.py:976] (5/7) Epoch 2, batch 2550, loss[loss=0.2836, simple_loss=0.3302, pruned_loss=0.1185, over 4829.00 frames. ], tot_loss[loss=0.2811, simple_loss=0.3234, pruned_loss=0.1194, over 955096.59 frames. ], batch size: 47, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:01:31,470 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8280.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:02:00,678 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.62 vs. limit=5.0 +2023-03-26 00:02:02,894 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.134e+02 2.551e+02 3.097e+02 6.135e+02, threshold=5.102e+02, percent-clipped=4.0 +2023-03-26 00:02:09,756 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8326.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:02:10,870 INFO [finetune.py:976] (5/7) Epoch 2, batch 2600, loss[loss=0.3149, simple_loss=0.3525, pruned_loss=0.1387, over 4730.00 frames. ], tot_loss[loss=0.2831, simple_loss=0.3254, pruned_loss=0.1204, over 954017.00 frames. ], batch size: 59, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:02:10,930 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=8328.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:03:09,432 INFO [finetune.py:976] (5/7) Epoch 2, batch 2650, loss[loss=0.2737, simple_loss=0.3183, pruned_loss=0.1145, over 4902.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3271, pruned_loss=0.1208, over 952110.64 frames. ], batch size: 37, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:03:28,511 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8399.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:03:45,674 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.975e+02 2.270e+02 2.796e+02 5.066e+02, threshold=4.539e+02, percent-clipped=0.0 +2023-03-26 00:03:57,444 INFO [finetune.py:976] (5/7) Epoch 2, batch 2700, loss[loss=0.2296, simple_loss=0.2889, pruned_loss=0.08508, over 4833.00 frames. ], tot_loss[loss=0.283, simple_loss=0.3259, pruned_loss=0.1201, over 952909.36 frames. ], batch size: 47, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:04:16,219 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7379, 1.6009, 1.3257, 1.5963, 1.8573, 1.5158, 2.1958, 1.6853], + device='cuda:5'), covar=tensor([0.3000, 0.5856, 0.5770, 0.5849, 0.3797, 0.2947, 0.5072, 0.3961], + device='cuda:5'), in_proj_covar=tensor([0.0162, 0.0194, 0.0237, 0.0250, 0.0211, 0.0180, 0.0200, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:04:21,451 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=8447.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:04:28,883 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8458.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:04:47,920 INFO [finetune.py:976] (5/7) Epoch 2, batch 2750, loss[loss=0.3118, simple_loss=0.3392, pruned_loss=0.1422, over 4897.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.323, pruned_loss=0.1189, over 953377.30 frames. ], batch size: 43, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:05:15,011 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=8506.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:05:25,226 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.975e+02 2.311e+02 2.901e+02 5.278e+02, threshold=4.621e+02, percent-clipped=1.0 +2023-03-26 00:05:29,693 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5836, 1.4460, 1.5352, 1.6699, 1.9902, 1.6714, 1.1217, 1.3838], + device='cuda:5'), covar=tensor([0.2907, 0.3004, 0.2473, 0.2253, 0.2597, 0.1692, 0.3787, 0.2300], + device='cuda:5'), in_proj_covar=tensor([0.0220, 0.0203, 0.0189, 0.0175, 0.0224, 0.0168, 0.0206, 0.0178], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:05:38,381 INFO [finetune.py:976] (5/7) Epoch 2, batch 2800, loss[loss=0.2179, simple_loss=0.2752, pruned_loss=0.08035, over 4816.00 frames. ], tot_loss[loss=0.2761, simple_loss=0.3187, pruned_loss=0.1167, over 954212.66 frames. ], batch size: 41, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:06:39,402 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9770, 1.8587, 2.5996, 1.4163, 2.1248, 1.9218, 1.6300, 2.2863], + device='cuda:5'), covar=tensor([0.2293, 0.2393, 0.1716, 0.2523, 0.1501, 0.2299, 0.2914, 0.1644], + device='cuda:5'), in_proj_covar=tensor([0.0204, 0.0206, 0.0203, 0.0194, 0.0177, 0.0222, 0.0212, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:06:43,770 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-26 00:06:44,210 INFO [finetune.py:976] (5/7) Epoch 2, batch 2850, loss[loss=0.2984, simple_loss=0.3418, pruned_loss=0.1275, over 4770.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3167, pruned_loss=0.1158, over 954120.28 frames. ], batch size: 54, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:06:45,816 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.94 vs. limit=5.0 +2023-03-26 00:06:51,102 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2712, 1.3316, 1.5025, 1.1239, 1.1115, 1.4155, 1.3845, 1.5073], + device='cuda:5'), covar=tensor([0.1446, 0.2149, 0.1447, 0.1467, 0.1224, 0.1366, 0.2587, 0.1003], + device='cuda:5'), in_proj_covar=tensor([0.0204, 0.0206, 0.0203, 0.0194, 0.0177, 0.0222, 0.0213, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:07:08,736 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.882e+02 2.289e+02 2.777e+02 4.779e+02, threshold=4.578e+02, percent-clipped=1.0 +2023-03-26 00:07:13,827 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8621.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:07:18,066 INFO [finetune.py:976] (5/7) Epoch 2, batch 2900, loss[loss=0.2602, simple_loss=0.3098, pruned_loss=0.1053, over 4769.00 frames. ], tot_loss[loss=0.2768, simple_loss=0.3194, pruned_loss=0.1171, over 953588.30 frames. ], batch size: 27, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:07:40,866 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7595, 3.8213, 3.7492, 1.7523, 3.9333, 2.9378, 0.8264, 2.6917], + device='cuda:5'), covar=tensor([0.2488, 0.2704, 0.1589, 0.3545, 0.1162, 0.1040, 0.5004, 0.1587], + device='cuda:5'), in_proj_covar=tensor([0.0158, 0.0166, 0.0166, 0.0130, 0.0157, 0.0120, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 00:07:51,241 INFO [finetune.py:976] (5/7) Epoch 2, batch 2950, loss[loss=0.329, simple_loss=0.3723, pruned_loss=0.1429, over 4814.00 frames. ], tot_loss[loss=0.2794, simple_loss=0.3224, pruned_loss=0.1181, over 954918.39 frames. ], batch size: 40, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:08:00,531 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 00:08:17,805 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.871e+02 2.334e+02 2.881e+02 5.890e+02, threshold=4.669e+02, percent-clipped=2.0 +2023-03-26 00:08:27,819 INFO [finetune.py:976] (5/7) Epoch 2, batch 3000, loss[loss=0.2815, simple_loss=0.3269, pruned_loss=0.118, over 4760.00 frames. ], tot_loss[loss=0.2797, simple_loss=0.3232, pruned_loss=0.1181, over 955395.91 frames. ], batch size: 54, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:08:27,819 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 00:08:31,370 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8391, 3.3627, 3.4435, 3.7185, 3.5218, 3.3634, 3.9040, 1.3887], + device='cuda:5'), covar=tensor([0.0900, 0.0879, 0.0925, 0.1002, 0.1495, 0.1445, 0.0721, 0.5055], + device='cuda:5'), in_proj_covar=tensor([0.0372, 0.0248, 0.0276, 0.0300, 0.0349, 0.0291, 0.0316, 0.0306], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:08:32,104 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5817, 1.3982, 2.0549, 2.8615, 2.0457, 2.1952, 0.9891, 2.2893], + device='cuda:5'), covar=tensor([0.2106, 0.1840, 0.1297, 0.0651, 0.0921, 0.1346, 0.1940, 0.0816], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0120, 0.0139, 0.0163, 0.0105, 0.0145, 0.0132, 0.0108], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 00:08:43,568 INFO [finetune.py:1010] (5/7) Epoch 2, validation: loss=0.1956, simple_loss=0.2636, pruned_loss=0.06384, over 2265189.00 frames. +2023-03-26 00:08:43,568 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6316MB +2023-03-26 00:08:44,825 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8084, 1.6642, 2.1352, 1.4663, 1.9094, 2.0085, 1.5914, 2.1705], + device='cuda:5'), covar=tensor([0.1718, 0.2255, 0.1658, 0.2239, 0.1234, 0.1818, 0.2668, 0.1067], + device='cuda:5'), in_proj_covar=tensor([0.0204, 0.0206, 0.0204, 0.0194, 0.0177, 0.0223, 0.0212, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:08:57,375 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 00:09:09,158 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8760.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:09:26,208 INFO [finetune.py:976] (5/7) Epoch 2, batch 3050, loss[loss=0.3243, simple_loss=0.3521, pruned_loss=0.1483, over 4233.00 frames. ], tot_loss[loss=0.2802, simple_loss=0.3239, pruned_loss=0.1183, over 954923.49 frames. ], batch size: 65, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:10:07,396 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.935e+02 2.296e+02 2.584e+02 4.666e+02, threshold=4.592e+02, percent-clipped=0.0 +2023-03-26 00:10:11,752 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8821.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:10:16,915 INFO [finetune.py:976] (5/7) Epoch 2, batch 3100, loss[loss=0.2074, simple_loss=0.2807, pruned_loss=0.06703, over 4792.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3205, pruned_loss=0.1163, over 955396.16 frames. ], batch size: 29, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:10:59,132 INFO [finetune.py:976] (5/7) Epoch 2, batch 3150, loss[loss=0.2811, simple_loss=0.3205, pruned_loss=0.1208, over 4743.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3178, pruned_loss=0.1156, over 955112.23 frames. ], batch size: 54, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:11:10,849 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6924, 3.4029, 3.3782, 1.5770, 3.5460, 2.6160, 0.9845, 2.4195], + device='cuda:5'), covar=tensor([0.2172, 0.1549, 0.1443, 0.3486, 0.0999, 0.1132, 0.4323, 0.1573], + device='cuda:5'), in_proj_covar=tensor([0.0157, 0.0166, 0.0166, 0.0129, 0.0156, 0.0120, 0.0148, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 00:11:28,138 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.838e+02 2.165e+02 2.835e+02 5.909e+02, threshold=4.329e+02, percent-clipped=1.0 +2023-03-26 00:11:33,406 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8921.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:11:43,256 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8927.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:11:43,733 INFO [finetune.py:976] (5/7) Epoch 2, batch 3200, loss[loss=0.276, simple_loss=0.3084, pruned_loss=0.1218, over 4818.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3115, pruned_loss=0.1118, over 956104.98 frames. ], batch size: 30, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:12:25,338 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=8969.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:12:30,761 INFO [finetune.py:976] (5/7) Epoch 2, batch 3250, loss[loss=0.2756, simple_loss=0.3244, pruned_loss=0.1134, over 4892.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3109, pruned_loss=0.1118, over 952167.76 frames. ], batch size: 32, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:12:36,284 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8986.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:12:37,509 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8988.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:12:37,539 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7352, 1.6348, 1.2759, 1.8074, 1.7110, 1.4149, 2.1450, 1.6637], + device='cuda:5'), covar=tensor([0.3041, 0.6015, 0.6176, 0.5465, 0.4227, 0.3006, 0.5323, 0.3953], + device='cuda:5'), in_proj_covar=tensor([0.0162, 0.0194, 0.0237, 0.0251, 0.0212, 0.0181, 0.0202, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:13:02,660 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9013.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:13:03,777 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.184e+02 1.894e+02 2.281e+02 2.922e+02 4.541e+02, threshold=4.561e+02, percent-clipped=3.0 +2023-03-26 00:13:11,706 INFO [finetune.py:976] (5/7) Epoch 2, batch 3300, loss[loss=0.2544, simple_loss=0.3132, pruned_loss=0.09777, over 4753.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.3151, pruned_loss=0.1133, over 952573.35 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:13:25,346 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9047.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:13:26,395 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.6380, 4.0594, 4.1784, 4.4761, 4.3226, 4.0905, 4.7447, 1.4648], + device='cuda:5'), covar=tensor([0.0743, 0.0758, 0.0698, 0.0871, 0.1267, 0.1333, 0.0612, 0.5154], + device='cuda:5'), in_proj_covar=tensor([0.0372, 0.0248, 0.0275, 0.0299, 0.0348, 0.0291, 0.0315, 0.0306], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:13:42,687 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9074.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:13:44,941 INFO [finetune.py:976] (5/7) Epoch 2, batch 3350, loss[loss=0.2486, simple_loss=0.2998, pruned_loss=0.09872, over 4736.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.3191, pruned_loss=0.1157, over 953822.86 frames. ], batch size: 27, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:14:20,192 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.942e+02 2.307e+02 2.996e+02 6.023e+02, threshold=4.614e+02, percent-clipped=2.0 +2023-03-26 00:14:20,877 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9116.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:14:28,092 INFO [finetune.py:976] (5/7) Epoch 2, batch 3400, loss[loss=0.2891, simple_loss=0.3421, pruned_loss=0.118, over 4805.00 frames. ], tot_loss[loss=0.2786, simple_loss=0.3221, pruned_loss=0.1175, over 954276.49 frames. ], batch size: 41, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:14:29,420 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3111, 2.9256, 3.0136, 3.2362, 3.0464, 2.8697, 3.3712, 1.0273], + device='cuda:5'), covar=tensor([0.1056, 0.0909, 0.0981, 0.1163, 0.1534, 0.1661, 0.1065, 0.4766], + device='cuda:5'), in_proj_covar=tensor([0.0372, 0.0248, 0.0276, 0.0300, 0.0348, 0.0290, 0.0315, 0.0306], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:14:52,972 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 00:15:11,614 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8453, 1.6534, 1.3205, 1.6558, 1.6049, 1.4703, 1.5219, 2.5984], + device='cuda:5'), covar=tensor([2.0356, 2.2097, 1.6315, 2.6420, 1.7753, 1.2009, 2.2496, 0.5528], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0232, 0.0210, 0.0269, 0.0223, 0.0189, 0.0229, 0.0172], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:15:13,368 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5554, 3.1635, 2.5565, 2.0752, 3.5284, 3.2920, 2.8482, 2.7695], + device='cuda:5'), covar=tensor([0.0793, 0.0472, 0.0772, 0.1012, 0.0219, 0.0634, 0.0732, 0.0807], + device='cuda:5'), in_proj_covar=tensor([0.0138, 0.0132, 0.0143, 0.0130, 0.0108, 0.0140, 0.0147, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:15:23,465 INFO [finetune.py:976] (5/7) Epoch 2, batch 3450, loss[loss=0.3089, simple_loss=0.3457, pruned_loss=0.1361, over 4891.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.323, pruned_loss=0.1178, over 955019.95 frames. ], batch size: 32, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:15:59,482 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.993e+02 2.349e+02 2.850e+02 4.291e+02, threshold=4.698e+02, percent-clipped=0.0 +2023-03-26 00:16:12,592 INFO [finetune.py:976] (5/7) Epoch 2, batch 3500, loss[loss=0.2514, simple_loss=0.2823, pruned_loss=0.1103, over 4904.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3188, pruned_loss=0.116, over 955191.55 frames. ], batch size: 46, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:17:13,577 INFO [finetune.py:976] (5/7) Epoch 2, batch 3550, loss[loss=0.2278, simple_loss=0.2815, pruned_loss=0.08704, over 4787.00 frames. ], tot_loss[loss=0.2699, simple_loss=0.3137, pruned_loss=0.113, over 955204.25 frames. ], batch size: 29, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:17:16,763 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9283.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:17:46,015 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-03-26 00:17:53,395 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.769e+02 2.188e+02 2.766e+02 5.069e+02, threshold=4.376e+02, percent-clipped=2.0 +2023-03-26 00:18:09,360 INFO [finetune.py:976] (5/7) Epoch 2, batch 3600, loss[loss=0.2835, simple_loss=0.3303, pruned_loss=0.1184, over 4937.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3114, pruned_loss=0.1119, over 955815.35 frames. ], batch size: 33, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:18:22,991 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9342.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:18:27,821 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.49 vs. limit=5.0 +2023-03-26 00:18:45,804 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9369.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:18:50,404 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.07 vs. limit=5.0 +2023-03-26 00:18:51,738 INFO [finetune.py:976] (5/7) Epoch 2, batch 3650, loss[loss=0.2785, simple_loss=0.3425, pruned_loss=0.1072, over 4821.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3143, pruned_loss=0.1128, over 957306.76 frames. ], batch size: 40, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:19:05,816 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-03-26 00:19:24,491 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9414.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:19:24,982 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.270e+02 1.902e+02 2.269e+02 2.850e+02 5.426e+02, threshold=4.539e+02, percent-clipped=4.0 +2023-03-26 00:19:31,908 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9416.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:19:41,288 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6712, 1.3524, 1.0150, 0.1883, 1.2257, 1.4964, 1.4511, 1.4188], + device='cuda:5'), covar=tensor([0.0906, 0.0940, 0.1542, 0.2316, 0.1399, 0.2632, 0.2348, 0.0881], + device='cuda:5'), in_proj_covar=tensor([0.0163, 0.0189, 0.0199, 0.0184, 0.0209, 0.0205, 0.0210, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:19:45,282 INFO [finetune.py:976] (5/7) Epoch 2, batch 3700, loss[loss=0.3103, simple_loss=0.3471, pruned_loss=0.1368, over 4803.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.318, pruned_loss=0.1135, over 956949.78 frames. ], batch size: 45, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:20:08,678 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0942, 0.7731, 0.9228, 0.8776, 1.1667, 1.2141, 1.0422, 0.9049], + device='cuda:5'), covar=tensor([0.0269, 0.0466, 0.0659, 0.0426, 0.0345, 0.0348, 0.0304, 0.0506], + device='cuda:5'), in_proj_covar=tensor([0.0082, 0.0112, 0.0132, 0.0112, 0.0102, 0.0097, 0.0086, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.3864e-05, 8.8733e-05, 1.0701e-04, 8.8814e-05, 8.1444e-05, 7.2086e-05, + 6.6639e-05, 8.4458e-05], device='cuda:5') +2023-03-26 00:20:15,847 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=9464.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:20:23,937 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9475.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:20:26,043 INFO [finetune.py:976] (5/7) Epoch 2, batch 3750, loss[loss=0.3149, simple_loss=0.3602, pruned_loss=0.1348, over 4707.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.32, pruned_loss=0.1143, over 956414.06 frames. ], batch size: 59, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:20:35,673 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-03-26 00:20:39,600 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4615, 1.2600, 1.2417, 1.2818, 1.6102, 1.3050, 1.6551, 1.4366], + device='cuda:5'), covar=tensor([0.2649, 0.5190, 0.5537, 0.4663, 0.3613, 0.2719, 0.4309, 0.3606], + device='cuda:5'), in_proj_covar=tensor([0.0163, 0.0195, 0.0238, 0.0252, 0.0215, 0.0182, 0.0203, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:20:55,392 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.893e+02 2.405e+02 2.686e+02 6.929e+02, threshold=4.810e+02, percent-clipped=1.0 +2023-03-26 00:20:57,341 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 00:21:01,202 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.01 vs. limit=5.0 +2023-03-26 00:21:10,673 INFO [finetune.py:976] (5/7) Epoch 2, batch 3800, loss[loss=0.3095, simple_loss=0.3377, pruned_loss=0.1406, over 4719.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3216, pruned_loss=0.1152, over 956336.14 frames. ], batch size: 59, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:21:33,151 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6052, 1.6872, 1.7608, 1.8916, 1.7560, 3.6410, 1.4629, 1.8090], + device='cuda:5'), covar=tensor([0.1044, 0.1639, 0.1070, 0.1012, 0.1513, 0.0220, 0.1499, 0.1666], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0080, 0.0077, 0.0079, 0.0092, 0.0082, 0.0085, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 00:21:53,455 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2768, 2.9064, 2.9845, 3.1798, 3.0406, 2.9041, 3.3279, 1.0650], + device='cuda:5'), covar=tensor([0.1032, 0.0953, 0.0983, 0.1110, 0.1490, 0.1473, 0.1102, 0.4774], + device='cuda:5'), in_proj_covar=tensor([0.0369, 0.0246, 0.0274, 0.0297, 0.0343, 0.0288, 0.0313, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:22:03,071 INFO [finetune.py:976] (5/7) Epoch 2, batch 3850, loss[loss=0.244, simple_loss=0.2927, pruned_loss=0.09763, over 4828.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3195, pruned_loss=0.1135, over 958650.00 frames. ], batch size: 33, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:22:07,227 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9583.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:22:07,846 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9584.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:22:08,544 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 00:22:12,130 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0310, 2.4502, 1.8175, 1.6191, 2.3526, 2.4888, 2.1771, 2.0116], + device='cuda:5'), covar=tensor([0.0763, 0.0465, 0.0968, 0.1037, 0.0913, 0.0552, 0.0710, 0.1024], + device='cuda:5'), in_proj_covar=tensor([0.0137, 0.0131, 0.0142, 0.0129, 0.0108, 0.0139, 0.0145, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:22:33,610 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.890e+02 2.331e+02 2.867e+02 5.576e+02, threshold=4.662e+02, percent-clipped=3.0 +2023-03-26 00:22:42,962 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-03-26 00:22:48,505 INFO [finetune.py:976] (5/7) Epoch 2, batch 3900, loss[loss=0.2897, simple_loss=0.33, pruned_loss=0.1247, over 4820.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.315, pruned_loss=0.1118, over 957516.18 frames. ], batch size: 39, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:22:55,984 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=9631.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:23:08,263 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9642.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:23:10,119 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9645.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:23:19,011 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5838, 2.2726, 1.9873, 0.9974, 2.1633, 1.9562, 1.6737, 1.9990], + device='cuda:5'), covar=tensor([0.0750, 0.0977, 0.1837, 0.2698, 0.1685, 0.2184, 0.2430, 0.1337], + device='cuda:5'), in_proj_covar=tensor([0.0162, 0.0189, 0.0199, 0.0183, 0.0209, 0.0204, 0.0209, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:23:28,466 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9658.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:23:29,684 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5230, 1.5164, 1.4883, 1.7267, 1.6241, 3.2634, 1.3765, 1.6998], + device='cuda:5'), covar=tensor([0.1039, 0.1806, 0.1246, 0.1086, 0.1660, 0.0275, 0.1536, 0.1697], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0080, 0.0077, 0.0079, 0.0092, 0.0082, 0.0085, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 00:23:40,512 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9669.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:23:46,468 INFO [finetune.py:976] (5/7) Epoch 2, batch 3950, loss[loss=0.2522, simple_loss=0.3056, pruned_loss=0.09939, over 4854.00 frames. ], tot_loss[loss=0.2641, simple_loss=0.3102, pruned_loss=0.109, over 957947.46 frames. ], batch size: 44, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:24:00,117 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=9690.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:24:27,795 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 00:24:28,621 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.967e+02 2.302e+02 2.784e+02 7.100e+02, threshold=4.604e+02, percent-clipped=2.0 +2023-03-26 00:24:29,312 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=9717.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:24:30,622 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9719.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:24:36,432 INFO [finetune.py:976] (5/7) Epoch 2, batch 4000, loss[loss=0.2944, simple_loss=0.3435, pruned_loss=0.1227, over 4913.00 frames. ], tot_loss[loss=0.2639, simple_loss=0.3093, pruned_loss=0.1093, over 956207.77 frames. ], batch size: 37, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:24:48,467 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9743.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:25:08,163 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-26 00:25:10,942 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9770.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:25:16,246 INFO [finetune.py:976] (5/7) Epoch 2, batch 4050, loss[loss=0.2457, simple_loss=0.2944, pruned_loss=0.09856, over 4818.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3138, pruned_loss=0.112, over 955807.07 frames. ], batch size: 33, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:25:37,132 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9804.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:25:44,429 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 1.934e+02 2.184e+02 2.585e+02 5.351e+02, threshold=4.368e+02, percent-clipped=2.0 +2023-03-26 00:25:57,342 INFO [finetune.py:976] (5/7) Epoch 2, batch 4100, loss[loss=0.2448, simple_loss=0.2968, pruned_loss=0.09638, over 4864.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3175, pruned_loss=0.1125, over 956122.80 frames. ], batch size: 31, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:26:08,961 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8428, 3.3283, 3.4792, 3.7211, 3.5379, 3.3792, 3.9218, 1.2402], + device='cuda:5'), covar=tensor([0.0885, 0.0814, 0.0821, 0.0991, 0.1551, 0.1535, 0.0878, 0.5186], + device='cuda:5'), in_proj_covar=tensor([0.0370, 0.0246, 0.0275, 0.0297, 0.0344, 0.0288, 0.0314, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:26:09,413 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-03-26 00:26:34,085 INFO [finetune.py:976] (5/7) Epoch 2, batch 4150, loss[loss=0.254, simple_loss=0.3173, pruned_loss=0.09532, over 4788.00 frames. ], tot_loss[loss=0.2731, simple_loss=0.3191, pruned_loss=0.1136, over 956197.65 frames. ], batch size: 51, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:26:37,882 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7560, 1.0269, 1.4001, 1.3839, 1.2149, 1.3051, 1.3061, 1.4406], + device='cuda:5'), covar=tensor([1.7452, 3.3689, 2.4846, 2.7755, 3.0062, 2.0889, 3.7573, 2.2064], + device='cuda:5'), in_proj_covar=tensor([0.0224, 0.0256, 0.0246, 0.0269, 0.0245, 0.0218, 0.0278, 0.0215], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:26:54,457 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6493, 1.4622, 1.3134, 1.4943, 1.7414, 1.3964, 1.9807, 1.5907], + device='cuda:5'), covar=tensor([0.2575, 0.5268, 0.5565, 0.5197, 0.3711, 0.2734, 0.4613, 0.3689], + device='cuda:5'), in_proj_covar=tensor([0.0163, 0.0196, 0.0238, 0.0253, 0.0215, 0.0182, 0.0204, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:27:05,052 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.789e+02 2.151e+02 2.605e+02 5.306e+02, threshold=4.302e+02, percent-clipped=2.0 +2023-03-26 00:27:17,333 INFO [finetune.py:976] (5/7) Epoch 2, batch 4200, loss[loss=0.2141, simple_loss=0.28, pruned_loss=0.0741, over 4800.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3181, pruned_loss=0.1125, over 956859.72 frames. ], batch size: 45, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:27:25,690 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9940.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:28:05,870 INFO [finetune.py:976] (5/7) Epoch 2, batch 4250, loss[loss=0.1968, simple_loss=0.2527, pruned_loss=0.07045, over 4830.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3156, pruned_loss=0.1112, over 956391.75 frames. ], batch size: 38, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:28:41,993 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10014.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:28:47,605 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.000e+02 1.798e+02 2.175e+02 2.768e+02 4.624e+02, threshold=4.351e+02, percent-clipped=3.0 +2023-03-26 00:29:00,128 INFO [finetune.py:976] (5/7) Epoch 2, batch 4300, loss[loss=0.2171, simple_loss=0.281, pruned_loss=0.07658, over 4755.00 frames. ], tot_loss[loss=0.2658, simple_loss=0.3119, pruned_loss=0.1098, over 956078.14 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:29:12,706 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10039.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:29:36,686 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10062.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 00:29:46,278 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10070.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:29:52,924 INFO [finetune.py:976] (5/7) Epoch 2, batch 4350, loss[loss=0.2264, simple_loss=0.2676, pruned_loss=0.09259, over 4830.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3071, pruned_loss=0.1076, over 955566.84 frames. ], batch size: 47, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:30:06,664 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10099.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:30:07,327 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10100.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 00:30:16,930 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7580, 0.9996, 0.9920, 1.4221, 2.0366, 0.9928, 1.2245, 1.4817], + device='cuda:5'), covar=tensor([0.1604, 0.2659, 0.2095, 0.1464, 0.2144, 0.2056, 0.1691, 0.2285], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0098, 0.0118, 0.0094, 0.0126, 0.0098, 0.0100, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 00:30:25,357 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.930e+02 2.275e+02 2.717e+02 4.483e+02, threshold=4.550e+02, percent-clipped=1.0 +2023-03-26 00:30:27,186 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=10118.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:30:34,431 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10123.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 00:30:37,375 INFO [finetune.py:976] (5/7) Epoch 2, batch 4400, loss[loss=0.2812, simple_loss=0.3268, pruned_loss=0.1178, over 4894.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3084, pruned_loss=0.1086, over 955367.63 frames. ], batch size: 37, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:31:21,124 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6449, 1.5098, 2.1722, 3.0250, 2.0969, 2.1310, 1.2238, 2.3757], + device='cuda:5'), covar=tensor([0.1715, 0.1464, 0.1108, 0.0521, 0.0813, 0.1906, 0.1594, 0.0668], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0121, 0.0140, 0.0166, 0.0106, 0.0148, 0.0132, 0.0109], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 00:31:35,877 INFO [finetune.py:976] (5/7) Epoch 2, batch 4450, loss[loss=0.2426, simple_loss=0.3127, pruned_loss=0.08619, over 4823.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3134, pruned_loss=0.1111, over 955725.78 frames. ], batch size: 40, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:31:37,189 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6143, 3.7714, 3.6244, 1.9860, 3.9262, 2.9269, 0.8689, 2.5417], + device='cuda:5'), covar=tensor([0.2507, 0.2265, 0.1616, 0.3128, 0.1078, 0.0993, 0.4730, 0.1605], + device='cuda:5'), in_proj_covar=tensor([0.0158, 0.0169, 0.0168, 0.0130, 0.0158, 0.0121, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 00:31:56,202 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3635, 0.4297, 1.2345, 1.0629, 1.0579, 1.0112, 0.9270, 1.1391], + device='cuda:5'), covar=tensor([1.3292, 2.5384, 1.8130, 2.1212, 2.2509, 1.5923, 2.6589, 1.5998], + device='cuda:5'), in_proj_covar=tensor([0.0226, 0.0257, 0.0248, 0.0270, 0.0246, 0.0219, 0.0280, 0.0217], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:32:18,680 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.270e+02 1.960e+02 2.424e+02 3.015e+02 7.276e+02, threshold=4.848e+02, percent-clipped=5.0 +2023-03-26 00:32:36,846 INFO [finetune.py:976] (5/7) Epoch 2, batch 4500, loss[loss=0.2617, simple_loss=0.2882, pruned_loss=0.1176, over 4026.00 frames. ], tot_loss[loss=0.2699, simple_loss=0.3159, pruned_loss=0.112, over 954656.93 frames. ], batch size: 17, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:32:44,232 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10240.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:33:20,851 INFO [finetune.py:976] (5/7) Epoch 2, batch 4550, loss[loss=0.2435, simple_loss=0.2974, pruned_loss=0.09477, over 4833.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3163, pruned_loss=0.1108, over 956030.38 frames. ], batch size: 47, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:33:32,453 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=10288.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:33:43,373 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7464, 1.5181, 1.2519, 1.4563, 1.4719, 1.4045, 1.3850, 2.3697], + device='cuda:5'), covar=tensor([1.8993, 1.8244, 1.5174, 2.0451, 1.4827, 1.0549, 1.9238, 0.5145], + device='cuda:5'), in_proj_covar=tensor([0.0256, 0.0236, 0.0213, 0.0273, 0.0227, 0.0191, 0.0232, 0.0175], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:33:59,123 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10314.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:34:00,220 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 1.803e+02 2.127e+02 2.576e+02 5.771e+02, threshold=4.255e+02, percent-clipped=1.0 +2023-03-26 00:34:00,934 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3279, 1.4970, 0.5310, 2.0078, 2.3796, 1.7781, 1.8624, 2.0696], + device='cuda:5'), covar=tensor([0.2029, 0.2869, 0.3067, 0.1686, 0.2419, 0.2487, 0.1790, 0.2828], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0098, 0.0117, 0.0094, 0.0125, 0.0097, 0.0099, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 00:34:14,432 INFO [finetune.py:976] (5/7) Epoch 2, batch 4600, loss[loss=0.2518, simple_loss=0.2939, pruned_loss=0.1049, over 4152.00 frames. ], tot_loss[loss=0.266, simple_loss=0.314, pruned_loss=0.109, over 955337.89 frames. ], batch size: 18, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:34:50,214 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=10362.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:34:55,007 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.73 vs. limit=5.0 +2023-03-26 00:35:01,346 INFO [finetune.py:976] (5/7) Epoch 2, batch 4650, loss[loss=0.262, simple_loss=0.3089, pruned_loss=0.1076, over 4912.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3127, pruned_loss=0.1093, over 954620.70 frames. ], batch size: 43, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:35:12,313 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10395.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 00:35:14,769 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10399.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:35:21,408 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10409.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:35:25,535 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.834e+02 2.117e+02 2.478e+02 4.313e+02, threshold=4.233e+02, percent-clipped=1.0 +2023-03-26 00:35:27,257 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10418.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 00:35:34,604 INFO [finetune.py:976] (5/7) Epoch 2, batch 4700, loss[loss=0.1976, simple_loss=0.2528, pruned_loss=0.07117, over 4815.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3085, pruned_loss=0.1076, over 955400.43 frames. ], batch size: 30, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:35:46,698 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=10447.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:35:51,000 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10454.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:36:01,589 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10470.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:36:07,253 INFO [finetune.py:976] (5/7) Epoch 2, batch 4750, loss[loss=0.329, simple_loss=0.3672, pruned_loss=0.1454, over 4820.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3071, pruned_loss=0.1077, over 956206.49 frames. ], batch size: 40, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:36:41,886 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10515.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 00:36:42,340 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.733e+02 2.215e+02 2.656e+02 7.843e+02, threshold=4.429e+02, percent-clipped=2.0 +2023-03-26 00:36:51,178 INFO [finetune.py:976] (5/7) Epoch 2, batch 4800, loss[loss=0.3214, simple_loss=0.3416, pruned_loss=0.1506, over 4709.00 frames. ], tot_loss[loss=0.2638, simple_loss=0.3096, pruned_loss=0.109, over 953764.56 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:36:51,626 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 00:37:43,276 INFO [finetune.py:976] (5/7) Epoch 2, batch 4850, loss[loss=0.2607, simple_loss=0.3162, pruned_loss=0.1026, over 4903.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3126, pruned_loss=0.1094, over 953936.45 frames. ], batch size: 46, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:38:02,119 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6112, 3.6612, 3.6364, 1.8945, 3.8542, 2.8471, 1.0011, 2.5611], + device='cuda:5'), covar=tensor([0.2389, 0.1857, 0.1555, 0.3302, 0.1022, 0.0953, 0.4592, 0.1613], + device='cuda:5'), in_proj_covar=tensor([0.0158, 0.0168, 0.0167, 0.0129, 0.0157, 0.0121, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 00:38:22,412 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.244e+02 1.945e+02 2.280e+02 2.839e+02 5.226e+02, threshold=4.560e+02, percent-clipped=1.0 +2023-03-26 00:38:27,840 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6095, 3.6268, 3.5911, 1.8527, 3.8416, 2.8382, 1.1511, 2.5753], + device='cuda:5'), covar=tensor([0.2595, 0.1400, 0.1418, 0.2896, 0.0883, 0.0868, 0.3805, 0.1301], + device='cuda:5'), in_proj_covar=tensor([0.0157, 0.0167, 0.0166, 0.0129, 0.0156, 0.0120, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 00:38:33,263 INFO [finetune.py:976] (5/7) Epoch 2, batch 4900, loss[loss=0.2495, simple_loss=0.2779, pruned_loss=0.1106, over 3971.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3133, pruned_loss=0.1097, over 951869.55 frames. ], batch size: 17, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:38:41,045 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.5485, 3.9328, 4.0492, 4.3809, 4.2299, 3.9545, 4.6251, 1.4933], + device='cuda:5'), covar=tensor([0.0723, 0.0726, 0.0834, 0.0864, 0.1319, 0.1453, 0.0608, 0.5107], + device='cuda:5'), in_proj_covar=tensor([0.0371, 0.0247, 0.0277, 0.0298, 0.0343, 0.0290, 0.0312, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:38:48,221 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6662, 1.5738, 1.2494, 1.7673, 1.8796, 1.4435, 2.1226, 1.6687], + device='cuda:5'), covar=tensor([0.2760, 0.4922, 0.5630, 0.4537, 0.3518, 0.2722, 0.4557, 0.3352], + device='cuda:5'), in_proj_covar=tensor([0.0163, 0.0195, 0.0237, 0.0252, 0.0215, 0.0182, 0.0205, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:39:15,445 INFO [finetune.py:976] (5/7) Epoch 2, batch 4950, loss[loss=0.3013, simple_loss=0.3332, pruned_loss=0.1347, over 4880.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.315, pruned_loss=0.11, over 953436.43 frames. ], batch size: 32, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:39:25,525 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8627, 1.1777, 1.3883, 1.4856, 1.3527, 1.4252, 1.4117, 1.4848], + device='cuda:5'), covar=tensor([1.7417, 3.4269, 2.5427, 2.8015, 3.0296, 2.0185, 3.7814, 2.2161], + device='cuda:5'), in_proj_covar=tensor([0.0225, 0.0255, 0.0248, 0.0267, 0.0244, 0.0218, 0.0278, 0.0215], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:39:27,280 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10695.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:39:36,963 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7214, 1.6106, 1.3552, 1.4477, 1.8794, 1.9864, 1.6811, 1.3459], + device='cuda:5'), covar=tensor([0.0218, 0.0294, 0.0594, 0.0324, 0.0207, 0.0327, 0.0233, 0.0337], + device='cuda:5'), in_proj_covar=tensor([0.0080, 0.0111, 0.0131, 0.0111, 0.0101, 0.0096, 0.0086, 0.0106], + device='cuda:5'), out_proj_covar=tensor([6.2872e-05, 8.7487e-05, 1.0627e-04, 8.8041e-05, 8.0378e-05, 7.2018e-05, + 6.6624e-05, 8.2840e-05], device='cuda:5') +2023-03-26 00:39:40,466 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 1.832e+02 2.144e+02 2.563e+02 4.788e+02, threshold=4.289e+02, percent-clipped=1.0 +2023-03-26 00:39:42,246 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10718.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 00:39:48,193 INFO [finetune.py:976] (5/7) Epoch 2, batch 5000, loss[loss=0.2313, simple_loss=0.2735, pruned_loss=0.09449, over 4776.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3125, pruned_loss=0.1083, over 956019.04 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:39:50,027 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2369, 1.2619, 1.3778, 0.6483, 1.2044, 1.4175, 1.6000, 1.3435], + device='cuda:5'), covar=tensor([0.0930, 0.0712, 0.0510, 0.0635, 0.0514, 0.0600, 0.0349, 0.0618], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0154, 0.0116, 0.0133, 0.0129, 0.0117, 0.0144, 0.0142], + device='cuda:5'), out_proj_covar=tensor([9.6035e-05, 1.1430e-04, 8.4989e-05, 9.7818e-05, 9.3756e-05, 8.6026e-05, + 1.0736e-04, 1.0517e-04], device='cuda:5') +2023-03-26 00:39:59,282 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1639, 3.6111, 3.7917, 3.8922, 3.5729, 3.4689, 4.2714, 1.5313], + device='cuda:5'), covar=tensor([0.1139, 0.1653, 0.1279, 0.1657, 0.2201, 0.2199, 0.1089, 0.6799], + device='cuda:5'), in_proj_covar=tensor([0.0371, 0.0246, 0.0277, 0.0298, 0.0343, 0.0291, 0.0311, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:39:59,861 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=10743.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:40:00,538 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10744.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:40:13,894 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10765.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:40:14,500 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=10766.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 00:40:19,855 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10774.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:40:25,991 INFO [finetune.py:976] (5/7) Epoch 2, batch 5050, loss[loss=0.2483, simple_loss=0.2958, pruned_loss=0.1004, over 4915.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3093, pruned_loss=0.1067, over 954700.35 frames. ], batch size: 36, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:40:48,608 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10805.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:40:52,130 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10810.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 00:40:55,640 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.711e+02 2.025e+02 2.497e+02 4.623e+02, threshold=4.049e+02, percent-clipped=1.0 +2023-03-26 00:41:03,401 INFO [finetune.py:976] (5/7) Epoch 2, batch 5100, loss[loss=0.2908, simple_loss=0.3253, pruned_loss=0.1282, over 4903.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3055, pruned_loss=0.1048, over 955681.67 frames. ], batch size: 36, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:41:07,742 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10835.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:41:37,350 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-26 00:41:47,384 INFO [finetune.py:976] (5/7) Epoch 2, batch 5150, loss[loss=0.2164, simple_loss=0.2671, pruned_loss=0.08284, over 4785.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3049, pruned_loss=0.1049, over 954220.28 frames. ], batch size: 29, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:41:54,268 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10885.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 00:41:56,073 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5275, 1.3743, 1.4073, 1.5093, 0.8740, 2.9526, 1.0331, 1.5131], + device='cuda:5'), covar=tensor([0.3496, 0.2496, 0.2184, 0.2382, 0.2328, 0.0278, 0.2996, 0.1572], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0108, 0.0114, 0.0116, 0.0113, 0.0095, 0.0099, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 00:42:25,067 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.204e+02 1.747e+02 2.089e+02 2.521e+02 5.475e+02, threshold=4.178e+02, percent-clipped=1.0 +2023-03-26 00:42:37,755 INFO [finetune.py:976] (5/7) Epoch 2, batch 5200, loss[loss=0.2091, simple_loss=0.2619, pruned_loss=0.07814, over 4762.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3082, pruned_loss=0.1063, over 955280.70 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:43:00,784 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10946.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 00:43:29,722 INFO [finetune.py:976] (5/7) Epoch 2, batch 5250, loss[loss=0.2626, simple_loss=0.3074, pruned_loss=0.1089, over 4860.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3112, pruned_loss=0.1069, over 955950.63 frames. ], batch size: 31, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:43:45,322 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3742, 2.2004, 1.7624, 0.9388, 1.9102, 1.8462, 1.5790, 1.9833], + device='cuda:5'), covar=tensor([0.0886, 0.0813, 0.1697, 0.2338, 0.1635, 0.2247, 0.2358, 0.1060], + device='cuda:5'), in_proj_covar=tensor([0.0163, 0.0192, 0.0202, 0.0185, 0.0211, 0.0207, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:43:53,629 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8076, 1.5245, 2.1646, 1.4989, 2.0037, 1.8919, 1.5200, 2.2402], + device='cuda:5'), covar=tensor([0.1633, 0.2472, 0.1843, 0.2239, 0.0981, 0.1779, 0.2895, 0.0999], + device='cuda:5'), in_proj_covar=tensor([0.0205, 0.0207, 0.0204, 0.0196, 0.0179, 0.0225, 0.0214, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:43:59,038 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8312, 3.3299, 3.4358, 3.7021, 3.5514, 3.3450, 3.9027, 1.2676], + device='cuda:5'), covar=tensor([0.0819, 0.0794, 0.0804, 0.0951, 0.1323, 0.1505, 0.0789, 0.4780], + device='cuda:5'), in_proj_covar=tensor([0.0370, 0.0246, 0.0276, 0.0297, 0.0342, 0.0290, 0.0312, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:44:03,162 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.206e+02 2.014e+02 2.376e+02 2.957e+02 8.531e+02, threshold=4.753e+02, percent-clipped=3.0 +2023-03-26 00:44:10,959 INFO [finetune.py:976] (5/7) Epoch 2, batch 5300, loss[loss=0.2823, simple_loss=0.3164, pruned_loss=0.1241, over 4777.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3127, pruned_loss=0.1082, over 956373.64 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:44:12,300 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0808, 1.7981, 1.7032, 1.4174, 2.1470, 2.5950, 2.1208, 1.8488], + device='cuda:5'), covar=tensor([0.0324, 0.0468, 0.0564, 0.0472, 0.0394, 0.0306, 0.0272, 0.0389], + device='cuda:5'), in_proj_covar=tensor([0.0081, 0.0112, 0.0133, 0.0113, 0.0102, 0.0097, 0.0087, 0.0107], + device='cuda:5'), out_proj_covar=tensor([6.3482e-05, 8.8641e-05, 1.0771e-04, 8.9274e-05, 8.1305e-05, 7.2726e-05, + 6.7464e-05, 8.3900e-05], device='cuda:5') +2023-03-26 00:44:44,457 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11065.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:44:45,741 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9985, 1.7826, 1.5913, 1.9308, 1.7168, 1.7583, 1.6784, 2.4943], + device='cuda:5'), covar=tensor([1.3326, 1.4173, 1.0632, 1.4182, 1.3023, 0.7637, 1.5436, 0.4056], + device='cuda:5'), in_proj_covar=tensor([0.0258, 0.0238, 0.0214, 0.0274, 0.0228, 0.0191, 0.0233, 0.0176], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:44:58,486 INFO [finetune.py:976] (5/7) Epoch 2, batch 5350, loss[loss=0.2672, simple_loss=0.3145, pruned_loss=0.11, over 4789.00 frames. ], tot_loss[loss=0.2626, simple_loss=0.3112, pruned_loss=0.107, over 954008.93 frames. ], batch size: 51, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:45:16,014 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11100.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:45:23,527 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11110.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 00:45:25,460 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=11113.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:45:27,194 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.217e+02 1.830e+02 2.178e+02 2.684e+02 7.389e+02, threshold=4.357e+02, percent-clipped=1.0 +2023-03-26 00:45:39,916 INFO [finetune.py:976] (5/7) Epoch 2, batch 5400, loss[loss=0.2506, simple_loss=0.3011, pruned_loss=0.1001, over 4684.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.309, pruned_loss=0.106, over 956147.83 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:45:41,702 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11130.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:45:52,535 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8672, 1.3697, 1.5770, 1.5467, 1.4371, 1.4361, 1.5100, 1.6474], + device='cuda:5'), covar=tensor([1.4102, 2.3834, 1.6735, 2.2229, 2.2367, 1.5403, 2.7991, 1.4806], + device='cuda:5'), in_proj_covar=tensor([0.0228, 0.0258, 0.0251, 0.0271, 0.0246, 0.0220, 0.0280, 0.0219], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 00:46:05,249 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=11158.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:46:22,040 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11174.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:46:23,464 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 00:46:24,391 INFO [finetune.py:976] (5/7) Epoch 2, batch 5450, loss[loss=0.2698, simple_loss=0.302, pruned_loss=0.1188, over 4828.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3067, pruned_loss=0.1058, over 957257.28 frames. ], batch size: 33, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:46:31,088 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9393, 1.9083, 1.7710, 2.1074, 1.3458, 4.6726, 1.7524, 2.4132], + device='cuda:5'), covar=tensor([0.3373, 0.2316, 0.1998, 0.2130, 0.1862, 0.0093, 0.2596, 0.1399], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0109, 0.0114, 0.0117, 0.0113, 0.0096, 0.0099, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 00:46:55,236 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.192e+02 1.719e+02 1.995e+02 2.396e+02 4.116e+02, threshold=3.991e+02, percent-clipped=0.0 +2023-03-26 00:47:11,561 INFO [finetune.py:976] (5/7) Epoch 2, batch 5500, loss[loss=0.2634, simple_loss=0.3083, pruned_loss=0.1093, over 4855.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3023, pruned_loss=0.1033, over 955285.28 frames. ], batch size: 44, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:47:21,358 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11235.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:47:30,512 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11241.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 00:47:50,649 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5947, 1.5737, 1.9806, 3.3161, 2.3570, 2.2530, 1.0757, 2.5457], + device='cuda:5'), covar=tensor([0.2028, 0.1584, 0.1486, 0.0595, 0.0855, 0.1576, 0.1951, 0.0794], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0121, 0.0140, 0.0166, 0.0106, 0.0147, 0.0131, 0.0109], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 00:48:00,492 INFO [finetune.py:976] (5/7) Epoch 2, batch 5550, loss[loss=0.2367, simple_loss=0.266, pruned_loss=0.1037, over 4206.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3052, pruned_loss=0.1049, over 955626.49 frames. ], batch size: 18, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:48:30,779 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.988e+02 2.263e+02 2.636e+02 5.646e+02, threshold=4.525e+02, percent-clipped=4.0 +2023-03-26 00:48:37,284 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 00:48:38,279 INFO [finetune.py:976] (5/7) Epoch 2, batch 5600, loss[loss=0.3298, simple_loss=0.3637, pruned_loss=0.148, over 4740.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3088, pruned_loss=0.1056, over 954199.06 frames. ], batch size: 54, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:49:19,623 INFO [finetune.py:976] (5/7) Epoch 2, batch 5650, loss[loss=0.2491, simple_loss=0.3071, pruned_loss=0.09553, over 4912.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3109, pruned_loss=0.1056, over 954365.94 frames. ], batch size: 37, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:49:22,948 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 00:49:34,463 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.7126, 1.5191, 1.5826, 0.7903, 1.5532, 1.8666, 1.7392, 1.5012], + device='cuda:5'), covar=tensor([0.0915, 0.0607, 0.0435, 0.0714, 0.0392, 0.0368, 0.0280, 0.0558], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0155, 0.0117, 0.0134, 0.0131, 0.0117, 0.0145, 0.0143], + device='cuda:5'), out_proj_covar=tensor([9.6894e-05, 1.1550e-04, 8.5562e-05, 9.8734e-05, 9.5168e-05, 8.6637e-05, + 1.0789e-04, 1.0573e-04], device='cuda:5') +2023-03-26 00:49:44,831 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11400.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:49:54,702 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.700e+02 2.043e+02 2.333e+02 3.537e+02, threshold=4.085e+02, percent-clipped=0.0 +2023-03-26 00:50:01,884 INFO [finetune.py:976] (5/7) Epoch 2, batch 5700, loss[loss=0.2408, simple_loss=0.2744, pruned_loss=0.1036, over 4159.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3069, pruned_loss=0.1051, over 935821.08 frames. ], batch size: 18, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:50:03,186 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11430.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:50:13,815 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=11448.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:50:33,841 INFO [finetune.py:976] (5/7) Epoch 3, batch 0, loss[loss=0.2558, simple_loss=0.3071, pruned_loss=0.1022, over 4806.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3071, pruned_loss=0.1022, over 4806.00 frames. ], batch size: 39, lr: 3.99e-03, grad_scale: 16.0 +2023-03-26 00:50:33,841 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 00:50:55,329 INFO [finetune.py:1010] (5/7) Epoch 3, validation: loss=0.1864, simple_loss=0.2566, pruned_loss=0.05807, over 2265189.00 frames. +2023-03-26 00:50:55,329 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6316MB +2023-03-26 00:51:20,330 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=11478.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:51:38,814 INFO [finetune.py:976] (5/7) Epoch 3, batch 50, loss[loss=0.2916, simple_loss=0.329, pruned_loss=0.1271, over 4712.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3109, pruned_loss=0.1082, over 215607.49 frames. ], batch size: 54, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:51:45,831 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.779e+02 2.075e+02 2.495e+02 4.593e+02, threshold=4.151e+02, percent-clipped=1.0 +2023-03-26 00:51:54,920 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11530.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:51:56,189 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11532.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:52:01,997 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11541.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 00:52:03,994 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-26 00:52:12,050 INFO [finetune.py:976] (5/7) Epoch 3, batch 100, loss[loss=0.2599, simple_loss=0.305, pruned_loss=0.1074, over 4892.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3038, pruned_loss=0.1046, over 378799.73 frames. ], batch size: 32, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:52:33,515 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=11589.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 00:52:36,404 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7409, 1.7310, 1.6665, 1.7647, 1.2363, 3.0238, 1.4653, 1.9887], + device='cuda:5'), covar=tensor([0.2871, 0.2037, 0.1645, 0.1885, 0.1705, 0.0228, 0.2586, 0.1171], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0109, 0.0114, 0.0117, 0.0114, 0.0096, 0.0099, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 00:52:36,429 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11593.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:52:47,407 INFO [finetune.py:976] (5/7) Epoch 3, batch 150, loss[loss=0.1943, simple_loss=0.2516, pruned_loss=0.06849, over 4806.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3012, pruned_loss=0.1043, over 507569.02 frames. ], batch size: 51, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:53:00,294 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 1.853e+02 2.230e+02 2.582e+02 4.758e+02, threshold=4.459e+02, percent-clipped=3.0 +2023-03-26 00:53:24,146 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11635.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:53:47,001 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1604, 3.6094, 3.7547, 4.0062, 3.8887, 3.7289, 4.2637, 1.3121], + device='cuda:5'), covar=tensor([0.0815, 0.0876, 0.0885, 0.1086, 0.1336, 0.1488, 0.0744, 0.5196], + device='cuda:5'), in_proj_covar=tensor([0.0369, 0.0244, 0.0274, 0.0296, 0.0340, 0.0287, 0.0311, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:53:48,112 INFO [finetune.py:976] (5/7) Epoch 3, batch 200, loss[loss=0.2943, simple_loss=0.3344, pruned_loss=0.127, over 4810.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.2992, pruned_loss=0.1031, over 608094.18 frames. ], batch size: 41, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:54:35,493 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11696.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:54:37,335 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11699.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:54:44,850 INFO [finetune.py:976] (5/7) Epoch 3, batch 250, loss[loss=0.2479, simple_loss=0.3106, pruned_loss=0.09262, over 4863.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.3048, pruned_loss=0.1057, over 686105.53 frames. ], batch size: 44, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:55:02,880 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.255e+02 1.783e+02 2.124e+02 2.629e+02 6.988e+02, threshold=4.248e+02, percent-clipped=1.0 +2023-03-26 00:55:32,487 INFO [finetune.py:976] (5/7) Epoch 3, batch 300, loss[loss=0.3043, simple_loss=0.3496, pruned_loss=0.1294, over 4846.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3112, pruned_loss=0.1084, over 746637.42 frames. ], batch size: 47, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:55:40,981 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11760.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:55:50,153 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8791, 2.4349, 2.0817, 1.2763, 2.3720, 2.3128, 2.0772, 2.2521], + device='cuda:5'), covar=tensor([0.0819, 0.0938, 0.1774, 0.2458, 0.1517, 0.1923, 0.1997, 0.1140], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0194, 0.0203, 0.0187, 0.0213, 0.0209, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:56:13,670 INFO [finetune.py:976] (5/7) Epoch 3, batch 350, loss[loss=0.2746, simple_loss=0.3198, pruned_loss=0.1147, over 4920.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3132, pruned_loss=0.1097, over 790894.76 frames. ], batch size: 33, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:56:20,323 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.015e+02 1.891e+02 2.265e+02 2.576e+02 3.939e+02, threshold=4.529e+02, percent-clipped=0.0 +2023-03-26 00:56:20,451 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11816.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:56:30,476 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11830.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:56:45,041 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11846.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:56:51,647 INFO [finetune.py:976] (5/7) Epoch 3, batch 400, loss[loss=0.2473, simple_loss=0.31, pruned_loss=0.09228, over 4893.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3131, pruned_loss=0.109, over 825570.59 frames. ], batch size: 43, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:57:20,642 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11877.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:57:21,182 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=11878.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:57:33,125 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11888.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:57:55,603 INFO [finetune.py:976] (5/7) Epoch 3, batch 450, loss[loss=0.2593, simple_loss=0.2979, pruned_loss=0.1104, over 4794.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.3111, pruned_loss=0.1078, over 853373.34 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:58:01,496 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11907.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:58:12,549 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.814e+02 2.317e+02 2.793e+02 4.030e+02, threshold=4.633e+02, percent-clipped=0.0 +2023-03-26 00:58:15,064 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11920.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:58:27,382 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7630, 1.8623, 1.8093, 1.0889, 2.1553, 1.9854, 1.8880, 1.6260], + device='cuda:5'), covar=tensor([0.0711, 0.0707, 0.0833, 0.1113, 0.0456, 0.0821, 0.0780, 0.1197], + device='cuda:5'), in_proj_covar=tensor([0.0140, 0.0133, 0.0145, 0.0131, 0.0110, 0.0142, 0.0148, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 00:58:39,234 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11948.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:58:50,438 INFO [finetune.py:976] (5/7) Epoch 3, batch 500, loss[loss=0.2177, simple_loss=0.2797, pruned_loss=0.07789, over 4792.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.3071, pruned_loss=0.1056, over 875877.57 frames. ], batch size: 51, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:58:50,629 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 00:59:19,998 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11981.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:59:27,079 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11984.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:59:31,809 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11991.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:59:39,234 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5076, 1.2875, 1.2331, 1.2782, 1.5770, 1.6469, 1.5000, 1.1789], + device='cuda:5'), covar=tensor([0.0318, 0.0370, 0.0581, 0.0322, 0.0259, 0.0363, 0.0306, 0.0423], + device='cuda:5'), in_proj_covar=tensor([0.0082, 0.0112, 0.0135, 0.0114, 0.0104, 0.0098, 0.0088, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.4425e-05, 8.8833e-05, 1.0913e-04, 9.0787e-05, 8.2684e-05, 7.3206e-05, + 6.7819e-05, 8.4656e-05], device='cuda:5') +2023-03-26 00:59:46,863 INFO [finetune.py:976] (5/7) Epoch 3, batch 550, loss[loss=0.2832, simple_loss=0.3237, pruned_loss=0.1214, over 4160.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3042, pruned_loss=0.1044, over 895047.24 frames. ], batch size: 65, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 00:59:49,289 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12009.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 00:59:53,395 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.776e+02 2.066e+02 2.700e+02 4.009e+02, threshold=4.133e+02, percent-clipped=0.0 +2023-03-26 01:00:15,556 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12045.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:00:21,767 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12055.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:00:21,807 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4532, 1.4465, 1.4047, 1.5314, 0.9285, 2.8882, 1.0477, 1.5533], + device='cuda:5'), covar=tensor([0.3464, 0.2388, 0.2085, 0.2319, 0.2098, 0.0235, 0.2856, 0.1425], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0109, 0.0114, 0.0117, 0.0114, 0.0096, 0.0099, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:00:22,313 INFO [finetune.py:976] (5/7) Epoch 3, batch 600, loss[loss=0.3166, simple_loss=0.3603, pruned_loss=0.1364, over 4823.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3035, pruned_loss=0.1037, over 909159.32 frames. ], batch size: 47, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 01:00:27,834 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6754, 1.6002, 1.6156, 1.7545, 1.3472, 3.6768, 1.4392, 2.0168], + device='cuda:5'), covar=tensor([0.3582, 0.2474, 0.2069, 0.2279, 0.1909, 0.0162, 0.2849, 0.1484], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0109, 0.0114, 0.0117, 0.0114, 0.0096, 0.0099, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:00:41,422 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12076.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:00:49,887 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 01:01:16,811 INFO [finetune.py:976] (5/7) Epoch 3, batch 650, loss[loss=0.245, simple_loss=0.2882, pruned_loss=0.1009, over 4790.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.3077, pruned_loss=0.1057, over 919882.80 frames. ], batch size: 29, lr: 3.99e-03, grad_scale: 32.0 +2023-03-26 01:01:20,063 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3725, 1.1739, 1.1910, 1.3599, 1.6083, 1.2898, 0.6687, 1.1752], + device='cuda:5'), covar=tensor([0.2635, 0.2708, 0.2235, 0.2003, 0.2017, 0.1420, 0.3426, 0.2096], + device='cuda:5'), in_proj_covar=tensor([0.0224, 0.0206, 0.0194, 0.0179, 0.0228, 0.0170, 0.0210, 0.0183], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:01:23,453 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.901e+02 2.250e+02 2.651e+02 5.885e+02, threshold=4.501e+02, percent-clipped=2.0 +2023-03-26 01:01:23,624 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9453, 1.7536, 1.3580, 1.7012, 1.6780, 1.5027, 1.5564, 2.5786], + device='cuda:5'), covar=tensor([1.3035, 1.3041, 1.0674, 1.4188, 1.0639, 0.7361, 1.3134, 0.3600], + device='cuda:5'), in_proj_covar=tensor([0.0262, 0.0241, 0.0216, 0.0278, 0.0231, 0.0193, 0.0235, 0.0179], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:01:50,153 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12137.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:01:57,403 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8517, 1.4002, 1.5612, 1.5925, 1.3868, 1.4223, 1.5758, 1.4556], + device='cuda:5'), covar=tensor([1.2104, 2.0333, 1.4498, 1.8025, 1.9307, 1.3295, 2.4195, 1.3787], + device='cuda:5'), in_proj_covar=tensor([0.0227, 0.0257, 0.0252, 0.0269, 0.0245, 0.0218, 0.0279, 0.0218], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:02:02,051 INFO [finetune.py:976] (5/7) Epoch 3, batch 700, loss[loss=0.1885, simple_loss=0.2447, pruned_loss=0.0661, over 4735.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.309, pruned_loss=0.1059, over 929119.64 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:02:03,365 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5917, 1.5060, 1.5217, 1.5995, 1.0809, 3.0369, 1.2153, 1.6299], + device='cuda:5'), covar=tensor([0.3367, 0.2563, 0.2046, 0.2347, 0.2086, 0.0236, 0.2706, 0.1461], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0110, 0.0114, 0.0118, 0.0114, 0.0096, 0.0099, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:02:07,632 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7565, 1.6411, 1.4048, 1.2806, 1.8781, 2.0836, 1.7617, 1.6789], + device='cuda:5'), covar=tensor([0.0381, 0.0474, 0.0712, 0.0512, 0.0496, 0.0679, 0.0419, 0.0454], + device='cuda:5'), in_proj_covar=tensor([0.0084, 0.0114, 0.0138, 0.0116, 0.0106, 0.0099, 0.0089, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.5394e-05, 8.9932e-05, 1.1146e-04, 9.2264e-05, 8.3961e-05, 7.4287e-05, + 6.8960e-05, 8.6323e-05], device='cuda:5') +2023-03-26 01:02:12,357 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12172.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:02:24,514 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12188.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:02:39,278 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12202.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:02:43,054 INFO [finetune.py:976] (5/7) Epoch 3, batch 750, loss[loss=0.2482, simple_loss=0.3139, pruned_loss=0.09119, over 4837.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.31, pruned_loss=0.1062, over 935571.60 frames. ], batch size: 47, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:02:54,949 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.207e+02 1.858e+02 2.308e+02 2.738e+02 5.308e+02, threshold=4.616e+02, percent-clipped=1.0 +2023-03-26 01:03:06,614 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8842, 1.3657, 1.5405, 1.5392, 1.3772, 1.4198, 1.4727, 1.4828], + device='cuda:5'), covar=tensor([1.1699, 2.0685, 1.5462, 1.8927, 1.9533, 1.4244, 2.5097, 1.3926], + device='cuda:5'), in_proj_covar=tensor([0.0228, 0.0258, 0.0252, 0.0270, 0.0246, 0.0219, 0.0281, 0.0219], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:03:14,108 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12236.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:03:30,123 INFO [finetune.py:976] (5/7) Epoch 3, batch 800, loss[loss=0.258, simple_loss=0.3109, pruned_loss=0.1026, over 4788.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.3087, pruned_loss=0.1052, over 939193.52 frames. ], batch size: 45, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:03:32,112 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4953, 0.5112, 1.3181, 1.1474, 1.1442, 1.1530, 0.9844, 1.2390], + device='cuda:5'), covar=tensor([0.9893, 1.7330, 1.4592, 1.4509, 1.5841, 1.1387, 1.9549, 1.2381], + device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0258, 0.0253, 0.0270, 0.0246, 0.0220, 0.0281, 0.0219], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:03:42,860 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12276.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:04:03,331 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12291.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:04:11,112 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12304.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:04:12,239 INFO [finetune.py:976] (5/7) Epoch 3, batch 850, loss[loss=0.2819, simple_loss=0.3198, pruned_loss=0.122, over 4825.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.306, pruned_loss=0.1037, over 944297.11 frames. ], batch size: 38, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:04:19,495 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.224e+02 1.828e+02 2.109e+02 2.576e+02 5.946e+02, threshold=4.217e+02, percent-clipped=1.0 +2023-03-26 01:04:46,215 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12339.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:04:46,835 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12340.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:05:06,381 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12355.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:05:06,917 INFO [finetune.py:976] (5/7) Epoch 3, batch 900, loss[loss=0.2393, simple_loss=0.2731, pruned_loss=0.1028, over 3889.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3015, pruned_loss=0.1011, over 947523.06 frames. ], batch size: 17, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:05:43,767 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12403.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:05:45,594 INFO [finetune.py:976] (5/7) Epoch 3, batch 950, loss[loss=0.2529, simple_loss=0.2969, pruned_loss=0.1044, over 4759.00 frames. ], tot_loss[loss=0.25, simple_loss=0.2993, pruned_loss=0.1003, over 948274.77 frames. ], batch size: 27, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:05:55,397 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1891, 2.8117, 2.7107, 1.1210, 2.9868, 2.0628, 0.9120, 1.7589], + device='cuda:5'), covar=tensor([0.2684, 0.2290, 0.1991, 0.3657, 0.1352, 0.1195, 0.3855, 0.1766], + device='cuda:5'), in_proj_covar=tensor([0.0157, 0.0168, 0.0167, 0.0129, 0.0156, 0.0121, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 01:05:57,754 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.779e+02 2.123e+02 2.532e+02 4.452e+02, threshold=4.246e+02, percent-clipped=1.0 +2023-03-26 01:06:12,423 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12432.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:06:22,013 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12446.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:06:22,082 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-03-26 01:06:28,914 INFO [finetune.py:976] (5/7) Epoch 3, batch 1000, loss[loss=0.2688, simple_loss=0.2941, pruned_loss=0.1217, over 3883.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.303, pruned_loss=0.1024, over 947706.96 frames. ], batch size: 16, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:06:39,165 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12472.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:07:17,561 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12502.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:07:19,896 INFO [finetune.py:976] (5/7) Epoch 3, batch 1050, loss[loss=0.2641, simple_loss=0.3161, pruned_loss=0.1061, over 4763.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3065, pruned_loss=0.1027, over 950751.97 frames. ], batch size: 28, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:07:20,616 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12507.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:07:31,081 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 2.008e+02 2.380e+02 2.733e+02 7.204e+02, threshold=4.759e+02, percent-clipped=3.0 +2023-03-26 01:07:38,120 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12520.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:08:07,974 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-03-26 01:08:08,765 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3871, 1.3765, 0.8896, 2.1899, 2.5722, 1.7926, 1.8403, 2.1022], + device='cuda:5'), covar=tensor([0.1386, 0.2125, 0.2245, 0.1133, 0.1859, 0.1866, 0.1318, 0.1937], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0098, 0.0116, 0.0093, 0.0124, 0.0097, 0.0099, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 01:08:10,399 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12550.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:08:17,961 INFO [finetune.py:976] (5/7) Epoch 3, batch 1100, loss[loss=0.2303, simple_loss=0.2872, pruned_loss=0.08674, over 4891.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3078, pruned_loss=0.1036, over 952131.22 frames. ], batch size: 32, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:08:35,488 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12576.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:08:57,059 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1737, 2.2468, 2.0559, 1.4439, 2.4542, 2.4547, 2.2655, 1.9954], + device='cuda:5'), covar=tensor([0.0747, 0.0615, 0.0856, 0.1154, 0.0391, 0.0702, 0.0782, 0.0927], + device='cuda:5'), in_proj_covar=tensor([0.0141, 0.0134, 0.0145, 0.0131, 0.0111, 0.0143, 0.0150, 0.0165], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:08:58,302 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12604.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:08:59,395 INFO [finetune.py:976] (5/7) Epoch 3, batch 1150, loss[loss=0.3228, simple_loss=0.3544, pruned_loss=0.1456, over 4817.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3089, pruned_loss=0.104, over 954035.67 frames. ], batch size: 33, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:09:11,545 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.712e+02 1.953e+02 2.432e+02 5.551e+02, threshold=3.906e+02, percent-clipped=1.0 +2023-03-26 01:09:21,031 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12624.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:09:42,260 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12640.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:10:00,916 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12652.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:10:00,980 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2325, 2.0358, 2.6807, 1.7850, 2.4550, 2.4117, 1.9210, 2.5922], + device='cuda:5'), covar=tensor([0.1869, 0.2515, 0.1981, 0.2650, 0.1134, 0.2198, 0.2817, 0.1167], + device='cuda:5'), in_proj_covar=tensor([0.0206, 0.0205, 0.0206, 0.0198, 0.0180, 0.0227, 0.0215, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:10:03,871 INFO [finetune.py:976] (5/7) Epoch 3, batch 1200, loss[loss=0.3012, simple_loss=0.3458, pruned_loss=0.1283, over 4810.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3057, pruned_loss=0.1021, over 955105.96 frames. ], batch size: 51, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:10:31,627 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12688.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:10:41,376 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5783, 0.5473, 1.4136, 1.2738, 1.2577, 1.2377, 1.0674, 1.3177], + device='cuda:5'), covar=tensor([0.8549, 1.5785, 1.2707, 1.3113, 1.3880, 1.0444, 1.6593, 1.0972], + device='cuda:5'), in_proj_covar=tensor([0.0227, 0.0256, 0.0252, 0.0269, 0.0245, 0.0219, 0.0279, 0.0218], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:10:43,571 INFO [finetune.py:976] (5/7) Epoch 3, batch 1250, loss[loss=0.2313, simple_loss=0.2796, pruned_loss=0.09155, over 4679.00 frames. ], tot_loss[loss=0.2521, simple_loss=0.3025, pruned_loss=0.1009, over 955692.13 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:10:49,032 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4722, 1.7309, 1.8674, 1.9194, 1.8284, 3.1530, 1.5430, 1.8320], + device='cuda:5'), covar=tensor([0.1010, 0.1372, 0.1149, 0.0899, 0.1228, 0.0259, 0.1178, 0.1388], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0081, 0.0078, 0.0080, 0.0093, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 01:10:51,327 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.066e+02 1.791e+02 2.152e+02 2.550e+02 3.946e+02, threshold=4.304e+02, percent-clipped=1.0 +2023-03-26 01:11:06,477 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12732.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:11:06,590 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.18 vs. limit=5.0 +2023-03-26 01:11:25,782 INFO [finetune.py:976] (5/7) Epoch 3, batch 1300, loss[loss=0.2226, simple_loss=0.2793, pruned_loss=0.08301, over 4894.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.2991, pruned_loss=0.09882, over 956645.30 frames. ], batch size: 32, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:11:31,245 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12763.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:11:40,240 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4268, 2.2507, 1.8572, 1.0062, 2.0210, 1.8326, 1.5661, 1.9960], + device='cuda:5'), covar=tensor([0.0781, 0.0878, 0.1753, 0.2311, 0.1515, 0.2372, 0.2527, 0.1167], + device='cuda:5'), in_proj_covar=tensor([0.0164, 0.0196, 0.0202, 0.0188, 0.0214, 0.0209, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:11:41,149 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 01:11:48,964 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=12780.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:12:19,505 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12802.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:12:22,422 INFO [finetune.py:976] (5/7) Epoch 3, batch 1350, loss[loss=0.2525, simple_loss=0.2973, pruned_loss=0.1039, over 4903.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.2993, pruned_loss=0.09955, over 956067.53 frames. ], batch size: 32, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:12:28,784 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3882, 2.4281, 2.3329, 1.6501, 2.7403, 2.6505, 2.4287, 2.1471], + device='cuda:5'), covar=tensor([0.0600, 0.0533, 0.0641, 0.0899, 0.0303, 0.0548, 0.0616, 0.0889], + device='cuda:5'), in_proj_covar=tensor([0.0141, 0.0134, 0.0145, 0.0132, 0.0111, 0.0144, 0.0150, 0.0166], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:12:39,795 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 01:12:40,682 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.079e+02 1.832e+02 2.182e+02 2.674e+02 3.468e+02, threshold=4.364e+02, percent-clipped=0.0 +2023-03-26 01:12:50,689 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12824.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 01:12:53,824 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.80 vs. limit=5.0 +2023-03-26 01:13:10,267 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5947, 1.6747, 1.6243, 1.3269, 1.6102, 1.8591, 1.9660, 1.4978], + device='cuda:5'), covar=tensor([0.1022, 0.0498, 0.0499, 0.0546, 0.0409, 0.0436, 0.0255, 0.0599], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0156, 0.0117, 0.0135, 0.0132, 0.0118, 0.0146, 0.0143], + device='cuda:5'), out_proj_covar=tensor([9.7458e-05, 1.1591e-04, 8.5839e-05, 9.8774e-05, 9.5504e-05, 8.7137e-05, + 1.0867e-04, 1.0600e-04], device='cuda:5') +2023-03-26 01:13:21,548 INFO [finetune.py:976] (5/7) Epoch 3, batch 1400, loss[loss=0.2344, simple_loss=0.294, pruned_loss=0.08743, over 4815.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3026, pruned_loss=0.101, over 956463.70 frames. ], batch size: 39, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:14:10,491 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3973, 1.4403, 0.7596, 2.2601, 2.5291, 1.7881, 1.9174, 2.1673], + device='cuda:5'), covar=tensor([0.1480, 0.2257, 0.2423, 0.1168, 0.1876, 0.2026, 0.1368, 0.1949], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0098, 0.0116, 0.0093, 0.0124, 0.0097, 0.0100, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 01:14:19,108 INFO [finetune.py:976] (5/7) Epoch 3, batch 1450, loss[loss=0.2512, simple_loss=0.299, pruned_loss=0.1017, over 4868.00 frames. ], tot_loss[loss=0.253, simple_loss=0.3043, pruned_loss=0.1009, over 957058.49 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:14:37,682 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 01:14:38,006 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.849e+02 2.229e+02 2.789e+02 4.972e+02, threshold=4.459e+02, percent-clipped=1.0 +2023-03-26 01:15:13,774 INFO [finetune.py:976] (5/7) Epoch 3, batch 1500, loss[loss=0.2416, simple_loss=0.3108, pruned_loss=0.08622, over 4906.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3079, pruned_loss=0.1028, over 958202.30 frames. ], batch size: 37, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:15:52,222 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1765, 1.8815, 1.6529, 1.7795, 1.8931, 1.7651, 1.7676, 2.6804], + device='cuda:5'), covar=tensor([1.1844, 1.1765, 0.9420, 1.2457, 0.9439, 0.6510, 1.1590, 0.3437], + device='cuda:5'), in_proj_covar=tensor([0.0266, 0.0244, 0.0217, 0.0279, 0.0233, 0.0194, 0.0236, 0.0181], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:15:58,824 INFO [finetune.py:976] (5/7) Epoch 3, batch 1550, loss[loss=0.2157, simple_loss=0.2728, pruned_loss=0.07936, over 4796.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3054, pruned_loss=0.1013, over 956078.63 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:16:09,623 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.917e+01 1.840e+02 2.219e+02 2.788e+02 8.539e+02, threshold=4.437e+02, percent-clipped=2.0 +2023-03-26 01:16:22,744 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.07 vs. limit=5.0 +2023-03-26 01:16:24,223 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 01:16:44,395 INFO [finetune.py:976] (5/7) Epoch 3, batch 1600, loss[loss=0.2372, simple_loss=0.2948, pruned_loss=0.08984, over 4813.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3025, pruned_loss=0.1001, over 956986.32 frames. ], batch size: 33, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:17:32,108 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1246, 1.9368, 2.0690, 0.9011, 2.2451, 2.5701, 2.0482, 2.0939], + device='cuda:5'), covar=tensor([0.1114, 0.0875, 0.0609, 0.1018, 0.0536, 0.0536, 0.0540, 0.0618], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0156, 0.0118, 0.0136, 0.0133, 0.0119, 0.0146, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.7740e-05, 1.1642e-04, 8.6648e-05, 9.9569e-05, 9.6296e-05, 8.7733e-05, + 1.0890e-04, 1.0665e-04], device='cuda:5') +2023-03-26 01:17:41,278 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=13102.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:17:43,646 INFO [finetune.py:976] (5/7) Epoch 3, batch 1650, loss[loss=0.2223, simple_loss=0.2721, pruned_loss=0.08621, over 4932.00 frames. ], tot_loss[loss=0.2469, simple_loss=0.2979, pruned_loss=0.09791, over 957383.32 frames. ], batch size: 33, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:17:55,152 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.264e+02 1.841e+02 2.109e+02 2.450e+02 4.226e+02, threshold=4.217e+02, percent-clipped=0.0 +2023-03-26 01:17:56,996 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=13119.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 01:18:31,373 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-03-26 01:18:32,152 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=13150.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:18:41,624 INFO [finetune.py:976] (5/7) Epoch 3, batch 1700, loss[loss=0.205, simple_loss=0.2605, pruned_loss=0.07477, over 4831.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.2962, pruned_loss=0.09798, over 956754.86 frames. ], batch size: 30, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:19:09,571 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4180, 1.4786, 1.4744, 1.7909, 1.5688, 3.2203, 1.2257, 1.6051], + device='cuda:5'), covar=tensor([0.1146, 0.1820, 0.1352, 0.1070, 0.1799, 0.0300, 0.1684, 0.1761], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0081, 0.0078, 0.0079, 0.0093, 0.0083, 0.0085, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 01:19:22,363 INFO [finetune.py:976] (5/7) Epoch 3, batch 1750, loss[loss=0.3, simple_loss=0.3391, pruned_loss=0.1304, over 4719.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3, pruned_loss=0.1003, over 955241.48 frames. ], batch size: 59, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:19:31,219 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.973e+02 2.314e+02 2.693e+02 6.749e+02, threshold=4.629e+02, percent-clipped=3.0 +2023-03-26 01:19:40,754 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.81 vs. limit=5.0 +2023-03-26 01:20:10,740 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 01:20:18,402 INFO [finetune.py:976] (5/7) Epoch 3, batch 1800, loss[loss=0.2643, simple_loss=0.3193, pruned_loss=0.1047, over 4931.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3028, pruned_loss=0.1009, over 956108.03 frames. ], batch size: 42, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:20:39,655 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7656, 1.1663, 1.5126, 1.4854, 1.3350, 1.3748, 1.4106, 1.3757], + device='cuda:5'), covar=tensor([1.0300, 1.6760, 1.3400, 1.5112, 1.7178, 1.1583, 1.9658, 1.2834], + device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0257, 0.0253, 0.0270, 0.0245, 0.0219, 0.0280, 0.0219], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:20:59,918 INFO [finetune.py:976] (5/7) Epoch 3, batch 1850, loss[loss=0.2755, simple_loss=0.3201, pruned_loss=0.1155, over 4856.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3044, pruned_loss=0.1022, over 954101.16 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:21:08,031 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.104e+02 1.691e+02 1.934e+02 2.488e+02 4.482e+02, threshold=3.868e+02, percent-clipped=0.0 +2023-03-26 01:21:16,938 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 01:21:50,069 INFO [finetune.py:976] (5/7) Epoch 3, batch 1900, loss[loss=0.282, simple_loss=0.3191, pruned_loss=0.1224, over 4827.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3068, pruned_loss=0.1034, over 955352.94 frames. ], batch size: 30, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:22:40,358 INFO [finetune.py:976] (5/7) Epoch 3, batch 1950, loss[loss=0.2457, simple_loss=0.2927, pruned_loss=0.09936, over 4763.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.304, pruned_loss=0.1015, over 954572.84 frames. ], batch size: 27, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:22:46,999 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.125e+02 1.849e+02 2.191e+02 2.472e+02 6.030e+02, threshold=4.381e+02, percent-clipped=4.0 +2023-03-26 01:22:48,827 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=13419.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:23:25,634 INFO [finetune.py:976] (5/7) Epoch 3, batch 2000, loss[loss=0.2637, simple_loss=0.3103, pruned_loss=0.1085, over 4697.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3002, pruned_loss=0.09991, over 953188.67 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:23:36,607 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=13467.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:24:18,864 INFO [finetune.py:976] (5/7) Epoch 3, batch 2050, loss[loss=0.2326, simple_loss=0.2804, pruned_loss=0.0924, over 4793.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.2971, pruned_loss=0.09867, over 953605.85 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:24:23,487 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3582, 1.4756, 1.5044, 1.7063, 1.5820, 3.0822, 1.2416, 1.5386], + device='cuda:5'), covar=tensor([0.1013, 0.1577, 0.1309, 0.0973, 0.1466, 0.0262, 0.1389, 0.1564], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0081, 0.0079, 0.0079, 0.0093, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 01:24:33,952 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.806e+02 2.206e+02 2.674e+02 5.377e+02, threshold=4.412e+02, percent-clipped=2.0 +2023-03-26 01:24:34,725 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5705, 2.2395, 1.7905, 0.8787, 1.9557, 1.9886, 1.7302, 1.9011], + device='cuda:5'), covar=tensor([0.0903, 0.0913, 0.1740, 0.2635, 0.1591, 0.2930, 0.2397, 0.1263], + device='cuda:5'), in_proj_covar=tensor([0.0164, 0.0195, 0.0201, 0.0187, 0.0213, 0.0207, 0.0214, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:24:38,944 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4014, 3.8272, 3.9959, 4.2607, 4.1542, 3.8593, 4.4879, 1.4563], + device='cuda:5'), covar=tensor([0.0719, 0.0822, 0.0820, 0.0849, 0.1113, 0.1217, 0.0689, 0.4996], + device='cuda:5'), in_proj_covar=tensor([0.0368, 0.0244, 0.0276, 0.0294, 0.0341, 0.0286, 0.0310, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:24:44,287 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7609, 1.6184, 1.3739, 1.4870, 1.8963, 1.9726, 1.6978, 1.3832], + device='cuda:5'), covar=tensor([0.0251, 0.0338, 0.0568, 0.0306, 0.0219, 0.0284, 0.0334, 0.0387], + device='cuda:5'), in_proj_covar=tensor([0.0082, 0.0112, 0.0134, 0.0115, 0.0103, 0.0097, 0.0088, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.4423e-05, 8.8735e-05, 1.0830e-04, 9.1277e-05, 8.1677e-05, 7.2481e-05, + 6.7742e-05, 8.4409e-05], device='cuda:5') +2023-03-26 01:24:53,834 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0251, 1.9280, 1.7828, 2.1238, 1.2835, 4.5903, 1.6709, 2.5752], + device='cuda:5'), covar=tensor([0.3295, 0.2221, 0.1928, 0.2069, 0.1805, 0.0099, 0.2563, 0.1227], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0109, 0.0114, 0.0117, 0.0114, 0.0096, 0.0099, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:25:00,755 INFO [finetune.py:976] (5/7) Epoch 3, batch 2100, loss[loss=0.239, simple_loss=0.2885, pruned_loss=0.09472, over 4731.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.2962, pruned_loss=0.0983, over 953261.95 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:25:45,311 INFO [finetune.py:976] (5/7) Epoch 3, batch 2150, loss[loss=0.1775, simple_loss=0.226, pruned_loss=0.06447, over 4161.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.2998, pruned_loss=0.09923, over 952885.00 frames. ], batch size: 17, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:26:01,366 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.918e+02 2.256e+02 2.684e+02 5.304e+02, threshold=4.512e+02, percent-clipped=2.0 +2023-03-26 01:26:27,570 INFO [finetune.py:976] (5/7) Epoch 3, batch 2200, loss[loss=0.211, simple_loss=0.2673, pruned_loss=0.0773, over 4767.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3005, pruned_loss=0.09895, over 951061.21 frames. ], batch size: 26, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:26:36,684 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0207, 1.6329, 2.1950, 1.4338, 2.0290, 2.0735, 1.7439, 2.3336], + device='cuda:5'), covar=tensor([0.1419, 0.2429, 0.1466, 0.2210, 0.0973, 0.1541, 0.2661, 0.0945], + device='cuda:5'), in_proj_covar=tensor([0.0204, 0.0202, 0.0202, 0.0195, 0.0178, 0.0225, 0.0212, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:26:43,374 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5982, 1.5193, 1.4569, 1.6939, 2.1052, 1.6367, 1.3790, 1.2885], + device='cuda:5'), covar=tensor([0.2580, 0.2510, 0.2116, 0.1906, 0.2171, 0.1356, 0.2980, 0.2091], + device='cuda:5'), in_proj_covar=tensor([0.0227, 0.0208, 0.0195, 0.0181, 0.0230, 0.0172, 0.0212, 0.0184], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:26:50,937 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7043, 1.6543, 1.7661, 1.9383, 1.8971, 3.2195, 1.4256, 1.8029], + device='cuda:5'), covar=tensor([0.0904, 0.1474, 0.0967, 0.0867, 0.1258, 0.0287, 0.1323, 0.1441], + device='cuda:5'), in_proj_covar=tensor([0.0079, 0.0082, 0.0079, 0.0080, 0.0094, 0.0084, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 01:27:00,299 INFO [finetune.py:976] (5/7) Epoch 3, batch 2250, loss[loss=0.2788, simple_loss=0.3308, pruned_loss=0.1134, over 4908.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3006, pruned_loss=0.09887, over 951217.55 frames. ], batch size: 36, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:27:08,392 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.927e+02 2.166e+02 2.564e+02 5.587e+02, threshold=4.333e+02, percent-clipped=2.0 +2023-03-26 01:27:24,312 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13737.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:27:42,150 INFO [finetune.py:976] (5/7) Epoch 3, batch 2300, loss[loss=0.2692, simple_loss=0.3084, pruned_loss=0.1149, over 4188.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3015, pruned_loss=0.09923, over 952890.65 frames. ], batch size: 65, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:28:25,839 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13798.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 01:28:36,681 INFO [finetune.py:976] (5/7) Epoch 3, batch 2350, loss[loss=0.2622, simple_loss=0.3049, pruned_loss=0.1098, over 4762.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.2992, pruned_loss=0.0988, over 952640.08 frames. ], batch size: 27, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:28:50,256 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.589e+01 1.791e+02 2.182e+02 2.579e+02 6.380e+02, threshold=4.365e+02, percent-clipped=2.0 +2023-03-26 01:29:31,310 INFO [finetune.py:976] (5/7) Epoch 3, batch 2400, loss[loss=0.2291, simple_loss=0.2779, pruned_loss=0.09011, over 4704.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.2972, pruned_loss=0.09848, over 952852.09 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:30:00,458 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9610, 1.7592, 1.4397, 1.7086, 1.6681, 1.6495, 1.6027, 2.5766], + device='cuda:5'), covar=tensor([1.2797, 1.2380, 0.9806, 1.3148, 1.0454, 0.6588, 1.2243, 0.3887], + device='cuda:5'), in_proj_covar=tensor([0.0268, 0.0245, 0.0218, 0.0281, 0.0234, 0.0195, 0.0237, 0.0183], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:30:15,687 INFO [finetune.py:976] (5/7) Epoch 3, batch 2450, loss[loss=0.2624, simple_loss=0.309, pruned_loss=0.1079, over 4820.00 frames. ], tot_loss[loss=0.244, simple_loss=0.2938, pruned_loss=0.09711, over 949615.14 frames. ], batch size: 40, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:30:29,103 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.953e+01 1.906e+02 2.150e+02 2.578e+02 4.181e+02, threshold=4.299e+02, percent-clipped=0.0 +2023-03-26 01:30:59,059 INFO [finetune.py:976] (5/7) Epoch 3, batch 2500, loss[loss=0.2081, simple_loss=0.2712, pruned_loss=0.07249, over 4799.00 frames. ], tot_loss[loss=0.246, simple_loss=0.296, pruned_loss=0.098, over 951936.68 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:31:53,428 INFO [finetune.py:976] (5/7) Epoch 3, batch 2550, loss[loss=0.2491, simple_loss=0.3158, pruned_loss=0.09116, over 4838.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3017, pruned_loss=0.1003, over 953257.49 frames. ], batch size: 49, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:31:53,545 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2092, 1.2469, 1.4700, 1.0489, 1.1496, 1.3709, 1.2995, 1.5065], + device='cuda:5'), covar=tensor([0.1409, 0.2079, 0.1403, 0.1569, 0.1103, 0.1420, 0.2536, 0.0885], + device='cuda:5'), in_proj_covar=tensor([0.0207, 0.0204, 0.0205, 0.0197, 0.0180, 0.0226, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:32:02,447 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.924e+01 1.822e+02 2.075e+02 2.520e+02 4.375e+02, threshold=4.150e+02, percent-clipped=1.0 +2023-03-26 01:32:27,221 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0425, 2.0247, 1.5938, 1.9978, 2.0861, 1.7272, 2.7212, 1.9953], + device='cuda:5'), covar=tensor([0.1725, 0.3006, 0.4151, 0.3742, 0.2898, 0.1917, 0.2281, 0.2420], + device='cuda:5'), in_proj_covar=tensor([0.0164, 0.0195, 0.0239, 0.0253, 0.0219, 0.0184, 0.0208, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:32:30,594 INFO [finetune.py:976] (5/7) Epoch 3, batch 2600, loss[loss=0.1944, simple_loss=0.2504, pruned_loss=0.06919, over 4775.00 frames. ], tot_loss[loss=0.253, simple_loss=0.3037, pruned_loss=0.1012, over 953802.59 frames. ], batch size: 26, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:32:41,347 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14072.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:33:04,945 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14093.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 01:33:06,823 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0118, 1.8163, 1.5970, 2.0668, 1.9591, 1.7244, 2.2790, 1.9816], + device='cuda:5'), covar=tensor([0.1949, 0.4004, 0.4084, 0.3710, 0.2784, 0.1981, 0.4108, 0.2613], + device='cuda:5'), in_proj_covar=tensor([0.0164, 0.0195, 0.0239, 0.0253, 0.0220, 0.0184, 0.0208, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:33:16,428 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14103.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:33:18,153 INFO [finetune.py:976] (5/7) Epoch 3, batch 2650, loss[loss=0.2729, simple_loss=0.3273, pruned_loss=0.1092, over 4919.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3046, pruned_loss=0.1015, over 950691.59 frames. ], batch size: 42, lr: 3.98e-03, grad_scale: 16.0 +2023-03-26 01:33:34,825 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.159e+02 1.796e+02 2.195e+02 2.771e+02 4.502e+02, threshold=4.390e+02, percent-clipped=2.0 +2023-03-26 01:33:36,034 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0374, 1.2916, 0.9588, 1.9680, 2.3154, 1.6718, 1.5854, 1.8712], + device='cuda:5'), covar=tensor([0.1541, 0.2143, 0.2138, 0.1147, 0.1934, 0.1983, 0.1423, 0.2000], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0099, 0.0118, 0.0094, 0.0124, 0.0098, 0.0100, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 01:33:37,261 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14120.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 01:33:56,206 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14133.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:34:00,725 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-03-26 01:34:18,024 INFO [finetune.py:976] (5/7) Epoch 3, batch 2700, loss[loss=0.2731, simple_loss=0.3206, pruned_loss=0.1128, over 4769.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3018, pruned_loss=0.09891, over 950463.96 frames. ], batch size: 26, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:34:28,761 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14164.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:34:38,327 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-03-26 01:34:50,976 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14181.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 01:35:13,614 INFO [finetune.py:976] (5/7) Epoch 3, batch 2750, loss[loss=0.2647, simple_loss=0.3035, pruned_loss=0.1129, over 4895.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.2992, pruned_loss=0.09861, over 952374.40 frames. ], batch size: 32, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:35:18,210 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-26 01:35:20,820 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.010e+02 1.613e+02 1.949e+02 2.418e+02 3.837e+02, threshold=3.898e+02, percent-clipped=0.0 +2023-03-26 01:35:38,061 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0161, 1.6626, 1.9627, 0.9274, 2.2081, 2.3897, 1.8552, 2.0334], + device='cuda:5'), covar=tensor([0.1639, 0.1626, 0.0667, 0.1163, 0.0751, 0.1084, 0.0880, 0.0952], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0158, 0.0118, 0.0136, 0.0132, 0.0119, 0.0147, 0.0143], + device='cuda:5'), out_proj_covar=tensor([9.8434e-05, 1.1741e-04, 8.5783e-05, 9.9953e-05, 9.5786e-05, 8.8191e-05, + 1.0966e-04, 1.0609e-04], device='cuda:5') +2023-03-26 01:35:50,233 INFO [finetune.py:976] (5/7) Epoch 3, batch 2800, loss[loss=0.2126, simple_loss=0.271, pruned_loss=0.07712, over 4773.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.2955, pruned_loss=0.09678, over 953363.69 frames. ], batch size: 28, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:36:35,464 INFO [finetune.py:976] (5/7) Epoch 3, batch 2850, loss[loss=0.1971, simple_loss=0.2617, pruned_loss=0.06623, over 4748.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2946, pruned_loss=0.09659, over 954743.13 frames. ], batch size: 26, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:36:48,619 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.210e+02 1.714e+02 2.069e+02 2.397e+02 3.427e+02, threshold=4.138e+02, percent-clipped=0.0 +2023-03-26 01:37:05,961 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-03-26 01:37:26,620 INFO [finetune.py:976] (5/7) Epoch 3, batch 2900, loss[loss=0.2584, simple_loss=0.3259, pruned_loss=0.09539, over 4815.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.2982, pruned_loss=0.09808, over 954929.60 frames. ], batch size: 51, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:37:44,428 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3415, 1.2735, 1.3128, 1.2877, 0.7332, 2.2863, 0.7218, 1.1667], + device='cuda:5'), covar=tensor([0.3675, 0.2586, 0.2249, 0.2514, 0.2422, 0.0351, 0.3007, 0.1692], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0111, 0.0116, 0.0119, 0.0116, 0.0097, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:37:55,686 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6977, 3.7075, 3.6154, 1.8189, 3.8533, 2.8429, 0.7950, 2.6200], + device='cuda:5'), covar=tensor([0.2536, 0.1932, 0.1552, 0.3245, 0.1031, 0.1041, 0.4678, 0.1472], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0168, 0.0165, 0.0129, 0.0155, 0.0120, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 01:38:06,374 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14393.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:38:25,745 INFO [finetune.py:976] (5/7) Epoch 3, batch 2950, loss[loss=0.2445, simple_loss=0.3103, pruned_loss=0.08938, over 4831.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3008, pruned_loss=0.09837, over 954126.72 frames. ], batch size: 30, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:38:27,068 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1022, 2.3578, 2.0348, 1.5300, 2.6169, 2.4698, 2.2900, 2.0183], + device='cuda:5'), covar=tensor([0.0775, 0.0598, 0.0888, 0.1041, 0.0346, 0.0754, 0.0779, 0.1043], + device='cuda:5'), in_proj_covar=tensor([0.0139, 0.0132, 0.0144, 0.0129, 0.0109, 0.0142, 0.0147, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:38:35,810 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14413.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:38:38,198 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.859e+02 2.169e+02 2.721e+02 5.785e+02, threshold=4.339e+02, percent-clipped=3.0 +2023-03-26 01:38:50,262 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14428.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:38:51,987 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1671, 1.2541, 1.3821, 0.6740, 1.0798, 1.4958, 1.5625, 1.2811], + device='cuda:5'), covar=tensor([0.1144, 0.0667, 0.0443, 0.0652, 0.0520, 0.0640, 0.0354, 0.0667], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0157, 0.0118, 0.0136, 0.0132, 0.0119, 0.0147, 0.0143], + device='cuda:5'), out_proj_covar=tensor([9.8685e-05, 1.1720e-04, 8.5883e-05, 9.9961e-05, 9.5553e-05, 8.8247e-05, + 1.0958e-04, 1.0620e-04], device='cuda:5') +2023-03-26 01:39:02,087 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=14441.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:39:20,296 INFO [finetune.py:976] (5/7) Epoch 3, batch 3000, loss[loss=0.2664, simple_loss=0.3277, pruned_loss=0.1025, over 4885.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3021, pruned_loss=0.09899, over 955034.60 frames. ], batch size: 32, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:39:20,296 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 01:39:22,634 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6479, 3.4776, 3.3885, 1.4829, 3.5328, 2.6452, 0.7291, 2.3559], + device='cuda:5'), covar=tensor([0.2017, 0.1418, 0.1598, 0.3916, 0.1071, 0.1103, 0.4420, 0.1763], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0167, 0.0164, 0.0129, 0.0154, 0.0120, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 01:39:23,812 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7320, 0.9232, 1.6118, 1.4739, 1.4142, 1.3747, 1.3040, 1.4597], + device='cuda:5'), covar=tensor([0.8791, 1.4163, 1.1230, 1.2321, 1.3904, 1.0399, 1.6764, 1.0139], + device='cuda:5'), in_proj_covar=tensor([0.0226, 0.0254, 0.0252, 0.0266, 0.0243, 0.0217, 0.0277, 0.0218], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:39:26,240 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6472, 1.6678, 1.9794, 1.4074, 1.7672, 1.8459, 1.6554, 2.0407], + device='cuda:5'), covar=tensor([0.1456, 0.2279, 0.1359, 0.1901, 0.1002, 0.1661, 0.2622, 0.0909], + device='cuda:5'), in_proj_covar=tensor([0.0206, 0.0204, 0.0204, 0.0196, 0.0181, 0.0225, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:39:27,813 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5915, 1.6407, 1.9442, 1.3537, 1.7358, 1.8063, 1.6244, 2.0048], + device='cuda:5'), covar=tensor([0.1670, 0.2333, 0.1562, 0.2109, 0.1022, 0.1717, 0.3039, 0.1022], + device='cuda:5'), in_proj_covar=tensor([0.0206, 0.0204, 0.0204, 0.0196, 0.0181, 0.0225, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:39:37,114 INFO [finetune.py:1010] (5/7) Epoch 3, validation: loss=0.1777, simple_loss=0.2485, pruned_loss=0.05342, over 2265189.00 frames. +2023-03-26 01:39:37,115 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6316MB +2023-03-26 01:39:42,157 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14459.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:40:02,208 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14474.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:40:03,375 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14476.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 01:40:15,379 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14487.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:40:36,154 INFO [finetune.py:976] (5/7) Epoch 3, batch 3050, loss[loss=0.2309, simple_loss=0.294, pruned_loss=0.08389, over 4830.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3021, pruned_loss=0.09862, over 954355.38 frames. ], batch size: 47, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:40:52,883 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 1.934e+02 2.277e+02 2.724e+02 4.940e+02, threshold=4.554e+02, percent-clipped=2.0 +2023-03-26 01:41:17,979 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14548.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:41:22,792 INFO [finetune.py:976] (5/7) Epoch 3, batch 3100, loss[loss=0.2602, simple_loss=0.3035, pruned_loss=0.1084, over 4927.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.2997, pruned_loss=0.09795, over 956460.34 frames. ], batch size: 33, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:41:38,452 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7414, 0.9680, 1.5651, 1.4313, 1.3898, 1.3784, 1.3300, 1.4092], + device='cuda:5'), covar=tensor([0.9280, 1.5235, 1.1277, 1.2346, 1.3952, 1.0009, 1.7049, 1.0721], + device='cuda:5'), in_proj_covar=tensor([0.0227, 0.0255, 0.0253, 0.0267, 0.0244, 0.0218, 0.0279, 0.0219], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:41:42,328 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2357, 2.5832, 2.4040, 1.3489, 2.6570, 2.2528, 1.8271, 2.1174], + device='cuda:5'), covar=tensor([0.0758, 0.1276, 0.2256, 0.2876, 0.2194, 0.2698, 0.2794, 0.1824], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0197, 0.0204, 0.0188, 0.0216, 0.0210, 0.0216, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:41:46,608 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8157, 1.2592, 1.6433, 1.5955, 1.4561, 1.4421, 1.5257, 1.4993], + device='cuda:5'), covar=tensor([0.8367, 1.4082, 1.0544, 1.2562, 1.3074, 0.9397, 1.5052, 0.9867], + device='cuda:5'), in_proj_covar=tensor([0.0227, 0.0255, 0.0253, 0.0267, 0.0244, 0.0218, 0.0278, 0.0219], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 01:41:51,355 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9629, 1.8331, 1.4671, 1.9533, 2.0381, 1.6885, 2.3861, 1.9597], + device='cuda:5'), covar=tensor([0.2269, 0.4408, 0.4792, 0.4384, 0.3317, 0.2283, 0.4206, 0.2946], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0195, 0.0239, 0.0254, 0.0220, 0.0184, 0.0208, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:42:10,658 INFO [finetune.py:976] (5/7) Epoch 3, batch 3150, loss[loss=0.2578, simple_loss=0.3054, pruned_loss=0.105, over 4767.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.297, pruned_loss=0.09713, over 957733.05 frames. ], batch size: 59, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:42:18,343 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.181e+02 1.775e+02 2.189e+02 2.683e+02 4.981e+02, threshold=4.378e+02, percent-clipped=2.0 +2023-03-26 01:43:00,564 INFO [finetune.py:976] (5/7) Epoch 3, batch 3200, loss[loss=0.275, simple_loss=0.3148, pruned_loss=0.1176, over 4829.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.294, pruned_loss=0.09622, over 957013.30 frames. ], batch size: 39, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:43:05,662 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5496, 1.3476, 1.3263, 1.3364, 1.6734, 1.6479, 1.4898, 1.3474], + device='cuda:5'), covar=tensor([0.0251, 0.0293, 0.0522, 0.0292, 0.0246, 0.0311, 0.0263, 0.0322], + device='cuda:5'), in_proj_covar=tensor([0.0083, 0.0113, 0.0135, 0.0116, 0.0104, 0.0098, 0.0089, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.4943e-05, 8.9491e-05, 1.0866e-04, 9.1990e-05, 8.2747e-05, 7.3246e-05, + 6.8609e-05, 8.4792e-05], device='cuda:5') +2023-03-26 01:43:29,636 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-26 01:43:41,545 INFO [finetune.py:976] (5/7) Epoch 3, batch 3250, loss[loss=0.1819, simple_loss=0.239, pruned_loss=0.06239, over 4810.00 frames. ], tot_loss[loss=0.245, simple_loss=0.2957, pruned_loss=0.09719, over 958730.14 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:43:54,505 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.171e+02 1.718e+02 2.102e+02 2.544e+02 5.358e+02, threshold=4.204e+02, percent-clipped=1.0 +2023-03-26 01:44:08,044 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14728.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:44:15,454 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14740.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:44:17,364 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-03-26 01:44:31,814 INFO [finetune.py:976] (5/7) Epoch 3, batch 3300, loss[loss=0.2916, simple_loss=0.3412, pruned_loss=0.121, over 4875.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.2999, pruned_loss=0.09899, over 954688.85 frames. ], batch size: 34, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:44:35,953 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14759.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:44:42,780 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14769.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:44:48,715 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=14776.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:44:48,742 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14776.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 01:45:09,006 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14801.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:45:16,878 INFO [finetune.py:976] (5/7) Epoch 3, batch 3350, loss[loss=0.2584, simple_loss=0.3105, pruned_loss=0.1031, over 4753.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.2999, pruned_loss=0.09865, over 952766.65 frames. ], batch size: 54, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:45:17,543 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=14807.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:45:29,677 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.682e+02 2.084e+02 2.593e+02 4.183e+02, threshold=4.169e+02, percent-clipped=0.0 +2023-03-26 01:45:39,450 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=14824.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 01:45:52,840 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14843.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:46:01,260 INFO [finetune.py:976] (5/7) Epoch 3, batch 3400, loss[loss=0.2516, simple_loss=0.3105, pruned_loss=0.09631, over 4909.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.2996, pruned_loss=0.09816, over 952098.73 frames. ], batch size: 36, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:46:41,112 INFO [finetune.py:976] (5/7) Epoch 3, batch 3450, loss[loss=0.2654, simple_loss=0.309, pruned_loss=0.1109, over 4717.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.2999, pruned_loss=0.09799, over 950887.54 frames. ], batch size: 59, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:46:53,149 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 1.935e+02 2.237e+02 2.692e+02 3.962e+02, threshold=4.475e+02, percent-clipped=0.0 +2023-03-26 01:47:00,706 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-03-26 01:47:13,576 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-26 01:47:27,612 INFO [finetune.py:976] (5/7) Epoch 3, batch 3500, loss[loss=0.2707, simple_loss=0.3003, pruned_loss=0.1205, over 4828.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.2968, pruned_loss=0.09701, over 950218.69 frames. ], batch size: 30, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:47:27,984 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.33 vs. limit=5.0 +2023-03-26 01:47:47,873 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4425, 3.2961, 3.1444, 1.6610, 3.3536, 2.6025, 0.8963, 2.3812], + device='cuda:5'), covar=tensor([0.2570, 0.1792, 0.1746, 0.3285, 0.1206, 0.0952, 0.4192, 0.1766], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0168, 0.0165, 0.0129, 0.0155, 0.0120, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 01:48:19,808 INFO [finetune.py:976] (5/7) Epoch 3, batch 3550, loss[loss=0.2744, simple_loss=0.3248, pruned_loss=0.112, over 4870.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2928, pruned_loss=0.09525, over 949938.76 frames. ], batch size: 34, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:48:21,741 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5000, 1.4955, 1.9639, 1.9677, 1.6833, 3.7976, 1.2392, 1.7757], + device='cuda:5'), covar=tensor([0.0997, 0.1556, 0.1311, 0.0944, 0.1452, 0.0211, 0.1442, 0.1607], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0081, 0.0078, 0.0080, 0.0093, 0.0084, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 01:48:26,975 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.116e+02 1.780e+02 2.199e+02 2.800e+02 5.904e+02, threshold=4.398e+02, percent-clipped=2.0 +2023-03-26 01:49:03,035 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7923, 1.2272, 0.9534, 1.6187, 2.0955, 1.3542, 1.4425, 1.8505], + device='cuda:5'), covar=tensor([0.1477, 0.2146, 0.2223, 0.1214, 0.2015, 0.2144, 0.1499, 0.1827], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0098, 0.0117, 0.0093, 0.0124, 0.0097, 0.0100, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 01:49:03,542 INFO [finetune.py:976] (5/7) Epoch 3, batch 3600, loss[loss=0.278, simple_loss=0.3225, pruned_loss=0.1167, over 4915.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.2888, pruned_loss=0.09285, over 951007.79 frames. ], batch size: 46, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:49:12,110 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15069.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:49:24,040 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4200, 2.2151, 1.7404, 2.5760, 2.4871, 1.9998, 2.9211, 2.2585], + device='cuda:5'), covar=tensor([0.2156, 0.5018, 0.4890, 0.4528, 0.3271, 0.2307, 0.3790, 0.3145], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0198, 0.0240, 0.0256, 0.0223, 0.0186, 0.0211, 0.0190], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:49:30,454 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6464, 1.6064, 1.6276, 0.9289, 1.7351, 1.9056, 1.8007, 1.5465], + device='cuda:5'), covar=tensor([0.1047, 0.0636, 0.0561, 0.0717, 0.0488, 0.0519, 0.0425, 0.0711], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0159, 0.0119, 0.0137, 0.0133, 0.0121, 0.0148, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.8782e-05, 1.1858e-04, 8.6715e-05, 1.0091e-04, 9.6176e-05, 8.9285e-05, + 1.1070e-04, 1.0709e-04], device='cuda:5') +2023-03-26 01:49:33,811 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5062, 2.2578, 2.8673, 1.9012, 2.7162, 2.7339, 2.0174, 2.9047], + device='cuda:5'), covar=tensor([0.1777, 0.2149, 0.1582, 0.2682, 0.0950, 0.2035, 0.2742, 0.1114], + device='cuda:5'), in_proj_covar=tensor([0.0207, 0.0207, 0.0206, 0.0198, 0.0182, 0.0227, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:49:42,984 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15096.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:49:49,067 INFO [finetune.py:976] (5/7) Epoch 3, batch 3650, loss[loss=0.2609, simple_loss=0.3027, pruned_loss=0.1095, over 4797.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2928, pruned_loss=0.09522, over 950364.92 frames. ], batch size: 29, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:49:56,319 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.204e+02 1.927e+02 2.238e+02 2.686e+02 4.916e+02, threshold=4.476e+02, percent-clipped=1.0 +2023-03-26 01:49:56,389 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=15117.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:50:17,868 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5108, 1.4042, 1.4717, 1.5598, 0.9483, 2.9501, 1.0806, 1.5943], + device='cuda:5'), covar=tensor([0.3466, 0.2536, 0.2063, 0.2365, 0.2135, 0.0251, 0.2834, 0.1487], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0111, 0.0116, 0.0119, 0.0116, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:50:28,377 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15143.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:50:41,690 INFO [finetune.py:976] (5/7) Epoch 3, batch 3700, loss[loss=0.2482, simple_loss=0.3139, pruned_loss=0.09126, over 4924.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.2969, pruned_loss=0.09638, over 951749.33 frames. ], batch size: 33, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:51:12,081 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6403, 1.5467, 1.5493, 1.6771, 1.1000, 3.3436, 1.2647, 1.7154], + device='cuda:5'), covar=tensor([0.3310, 0.2397, 0.1939, 0.2181, 0.1972, 0.0212, 0.2654, 0.1418], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0111, 0.0116, 0.0119, 0.0116, 0.0097, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:51:13,294 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5593, 1.4852, 2.1992, 3.2108, 2.2356, 2.2359, 1.2450, 2.4597], + device='cuda:5'), covar=tensor([0.1900, 0.1528, 0.1223, 0.0598, 0.0834, 0.1759, 0.1689, 0.0711], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0120, 0.0138, 0.0166, 0.0105, 0.0144, 0.0130, 0.0108], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 01:51:13,348 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8545, 1.7184, 1.3657, 1.7824, 1.9094, 1.5468, 2.1849, 1.7674], + device='cuda:5'), covar=tensor([0.2234, 0.4732, 0.5205, 0.4628, 0.3760, 0.2487, 0.4836, 0.3264], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0198, 0.0240, 0.0257, 0.0223, 0.0187, 0.0211, 0.0190], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:51:16,176 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=15191.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:51:19,617 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15195.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:51:29,141 INFO [finetune.py:976] (5/7) Epoch 3, batch 3750, loss[loss=0.2032, simple_loss=0.2774, pruned_loss=0.06455, over 4769.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.2983, pruned_loss=0.09655, over 954191.93 frames. ], batch size: 28, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:51:40,517 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.801e+02 2.153e+02 2.622e+02 6.720e+02, threshold=4.305e+02, percent-clipped=1.0 +2023-03-26 01:52:11,992 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-26 01:52:32,721 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9399, 1.5769, 2.3000, 1.5514, 2.1208, 2.0171, 1.6276, 2.3683], + device='cuda:5'), covar=tensor([0.1456, 0.2066, 0.1310, 0.2039, 0.0902, 0.1658, 0.2618, 0.0856], + device='cuda:5'), in_proj_covar=tensor([0.0205, 0.0204, 0.0204, 0.0196, 0.0181, 0.0225, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 01:52:33,791 INFO [finetune.py:976] (5/7) Epoch 3, batch 3800, loss[loss=0.2501, simple_loss=0.3022, pruned_loss=0.09899, over 4884.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.2993, pruned_loss=0.09661, over 953090.95 frames. ], batch size: 32, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:52:33,936 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15256.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 01:53:22,428 INFO [finetune.py:976] (5/7) Epoch 3, batch 3850, loss[loss=0.2438, simple_loss=0.2959, pruned_loss=0.09582, over 4885.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.2981, pruned_loss=0.09641, over 952386.55 frames. ], batch size: 35, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:53:22,545 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1040, 1.9684, 1.9185, 2.1185, 1.5191, 3.9181, 1.8648, 2.5054], + device='cuda:5'), covar=tensor([0.2739, 0.2071, 0.1705, 0.1887, 0.1670, 0.0172, 0.2177, 0.1077], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0111, 0.0115, 0.0119, 0.0116, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:53:28,798 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-03-26 01:53:39,317 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.276e+02 1.876e+02 2.253e+02 2.579e+02 5.032e+02, threshold=4.505e+02, percent-clipped=1.0 +2023-03-26 01:54:03,958 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4005, 1.9928, 1.8579, 1.5594, 2.2853, 2.6817, 2.2667, 2.1155], + device='cuda:5'), covar=tensor([0.0226, 0.0491, 0.0552, 0.0409, 0.0338, 0.0424, 0.0349, 0.0425], + device='cuda:5'), in_proj_covar=tensor([0.0083, 0.0113, 0.0135, 0.0116, 0.0104, 0.0098, 0.0089, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.4927e-05, 8.9466e-05, 1.0880e-04, 9.1529e-05, 8.2465e-05, 7.2825e-05, + 6.8480e-05, 8.4461e-05], device='cuda:5') +2023-03-26 01:54:26,487 INFO [finetune.py:976] (5/7) Epoch 3, batch 3900, loss[loss=0.2579, simple_loss=0.3072, pruned_loss=0.1043, over 4930.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.2949, pruned_loss=0.09467, over 954711.36 frames. ], batch size: 38, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:55:10,116 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15396.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:55:16,820 INFO [finetune.py:976] (5/7) Epoch 3, batch 3950, loss[loss=0.1997, simple_loss=0.2635, pruned_loss=0.06791, over 4935.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.291, pruned_loss=0.09267, over 956646.07 frames. ], batch size: 33, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:55:23,457 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6264, 1.5296, 1.4043, 1.5998, 1.0757, 3.5419, 1.3974, 2.0994], + device='cuda:5'), covar=tensor([0.4115, 0.2955, 0.2407, 0.2737, 0.1991, 0.0254, 0.2670, 0.1297], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0111, 0.0116, 0.0119, 0.0116, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:55:25,255 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.678e+02 2.164e+02 2.472e+02 4.231e+02, threshold=4.328e+02, percent-clipped=0.0 +2023-03-26 01:55:52,197 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=15444.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 01:55:59,827 INFO [finetune.py:976] (5/7) Epoch 3, batch 4000, loss[loss=0.2619, simple_loss=0.3214, pruned_loss=0.1012, over 4807.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.2911, pruned_loss=0.09391, over 953381.66 frames. ], batch size: 45, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:57:05,458 INFO [finetune.py:976] (5/7) Epoch 3, batch 4050, loss[loss=0.2641, simple_loss=0.3243, pruned_loss=0.1019, over 4848.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2949, pruned_loss=0.0949, over 954382.77 frames. ], batch size: 49, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:57:20,271 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.798e+02 2.110e+02 2.647e+02 5.396e+02, threshold=4.219e+02, percent-clipped=2.0 +2023-03-26 01:57:40,718 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7913, 1.6343, 1.5376, 1.8516, 1.2162, 4.3699, 1.6694, 2.2888], + device='cuda:5'), covar=tensor([0.3285, 0.2363, 0.2010, 0.2103, 0.1827, 0.0099, 0.2406, 0.1310], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0111, 0.0116, 0.0119, 0.0116, 0.0097, 0.0102, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 01:57:58,328 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15551.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 01:58:06,352 INFO [finetune.py:976] (5/7) Epoch 3, batch 4100, loss[loss=0.2593, simple_loss=0.3197, pruned_loss=0.09938, over 4821.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.2984, pruned_loss=0.09608, over 953775.44 frames. ], batch size: 39, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:59:02,793 INFO [finetune.py:976] (5/7) Epoch 3, batch 4150, loss[loss=0.2576, simple_loss=0.3206, pruned_loss=0.09729, over 4811.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.2992, pruned_loss=0.09591, over 954581.41 frames. ], batch size: 51, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 01:59:10,586 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.791e+02 2.157e+02 2.467e+02 4.537e+02, threshold=4.313e+02, percent-clipped=1.0 +2023-03-26 01:59:51,469 INFO [finetune.py:976] (5/7) Epoch 3, batch 4200, loss[loss=0.2259, simple_loss=0.2774, pruned_loss=0.08718, over 4902.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.2994, pruned_loss=0.09609, over 953713.80 frames. ], batch size: 36, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 02:00:44,069 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.30 vs. limit=5.0 +2023-03-26 02:00:53,255 INFO [finetune.py:976] (5/7) Epoch 3, batch 4250, loss[loss=0.2414, simple_loss=0.2946, pruned_loss=0.09408, over 4783.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2968, pruned_loss=0.09511, over 954344.79 frames. ], batch size: 26, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 02:01:00,001 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.228e+02 1.731e+02 2.073e+02 2.469e+02 5.386e+02, threshold=4.147e+02, percent-clipped=2.0 +2023-03-26 02:01:16,427 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3476, 2.6787, 2.2668, 1.4154, 2.5704, 2.4147, 1.8401, 2.0822], + device='cuda:5'), covar=tensor([0.0708, 0.1202, 0.2369, 0.2689, 0.2142, 0.2175, 0.2628, 0.1626], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0198, 0.0205, 0.0190, 0.0218, 0.0212, 0.0218, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:01:22,082 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8617, 3.3768, 3.4990, 3.7113, 3.6126, 3.4338, 3.9747, 1.2958], + device='cuda:5'), covar=tensor([0.0974, 0.0863, 0.0831, 0.1080, 0.1543, 0.1512, 0.0823, 0.5430], + device='cuda:5'), in_proj_covar=tensor([0.0367, 0.0244, 0.0277, 0.0292, 0.0341, 0.0287, 0.0311, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:01:43,528 INFO [finetune.py:976] (5/7) Epoch 3, batch 4300, loss[loss=0.236, simple_loss=0.2781, pruned_loss=0.09695, over 4794.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2929, pruned_loss=0.0936, over 955063.85 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 02:02:00,653 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.95 vs. limit=5.0 +2023-03-26 02:02:43,903 INFO [finetune.py:976] (5/7) Epoch 3, batch 4350, loss[loss=0.2536, simple_loss=0.2878, pruned_loss=0.1097, over 4867.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.2895, pruned_loss=0.09212, over 955899.34 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 32.0 +2023-03-26 02:02:44,273 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 02:03:01,710 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.218e+02 1.712e+02 2.007e+02 2.460e+02 4.679e+02, threshold=4.015e+02, percent-clipped=1.0 +2023-03-26 02:03:33,460 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15851.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:03:36,424 INFO [finetune.py:976] (5/7) Epoch 3, batch 4400, loss[loss=0.2538, simple_loss=0.3108, pruned_loss=0.09839, over 4894.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.2902, pruned_loss=0.09318, over 951661.38 frames. ], batch size: 43, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:04:05,506 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15883.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:04:26,297 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=15899.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:04:30,837 INFO [finetune.py:976] (5/7) Epoch 3, batch 4450, loss[loss=0.2513, simple_loss=0.3147, pruned_loss=0.09392, over 4839.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.2937, pruned_loss=0.09404, over 953345.23 frames. ], batch size: 47, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:04:48,085 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.107e+02 1.872e+02 2.224e+02 2.526e+02 5.583e+02, threshold=4.448e+02, percent-clipped=1.0 +2023-03-26 02:05:15,999 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15944.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:05:23,248 INFO [finetune.py:976] (5/7) Epoch 3, batch 4500, loss[loss=0.2217, simple_loss=0.2808, pruned_loss=0.08127, over 4742.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.2967, pruned_loss=0.09522, over 952299.58 frames. ], batch size: 27, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:06:21,353 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16001.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:06:23,489 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.35 vs. limit=5.0 +2023-03-26 02:06:24,312 INFO [finetune.py:976] (5/7) Epoch 3, batch 4550, loss[loss=0.2782, simple_loss=0.3224, pruned_loss=0.117, over 4860.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.2982, pruned_loss=0.09603, over 953320.05 frames. ], batch size: 31, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:06:36,376 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4993, 1.3963, 1.4352, 1.5332, 1.0133, 2.8074, 1.1225, 1.5277], + device='cuda:5'), covar=tensor([0.3728, 0.2542, 0.2211, 0.2379, 0.2085, 0.0297, 0.2749, 0.1541], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0111, 0.0116, 0.0119, 0.0116, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 02:06:36,865 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.237e+02 1.760e+02 2.085e+02 2.487e+02 3.865e+02, threshold=4.170e+02, percent-clipped=0.0 +2023-03-26 02:07:03,514 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7721, 1.2655, 0.8056, 1.6073, 2.0710, 1.1829, 1.4546, 1.6289], + device='cuda:5'), covar=tensor([0.1603, 0.2159, 0.2338, 0.1331, 0.2085, 0.2224, 0.1481, 0.2176], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0099, 0.0118, 0.0094, 0.0125, 0.0098, 0.0100, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 02:07:05,907 INFO [finetune.py:976] (5/7) Epoch 3, batch 4600, loss[loss=0.2679, simple_loss=0.311, pruned_loss=0.1124, over 4819.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.2984, pruned_loss=0.09604, over 954582.61 frames. ], batch size: 38, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:07:11,899 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16062.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:07:32,164 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0770, 1.9360, 1.7744, 2.0755, 1.2392, 4.5221, 1.7331, 2.4287], + device='cuda:5'), covar=tensor([0.3391, 0.2214, 0.1929, 0.1964, 0.1808, 0.0097, 0.2302, 0.1295], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0111, 0.0116, 0.0120, 0.0116, 0.0097, 0.0102, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 02:07:35,234 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16085.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:07:59,849 INFO [finetune.py:976] (5/7) Epoch 3, batch 4650, loss[loss=0.2342, simple_loss=0.2933, pruned_loss=0.08753, over 4779.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2946, pruned_loss=0.09395, over 955925.55 frames. ], batch size: 29, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:08:03,619 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-03-26 02:08:07,260 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.007e+02 1.817e+02 2.201e+02 2.598e+02 3.850e+02, threshold=4.403e+02, percent-clipped=0.0 +2023-03-26 02:08:39,860 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16146.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:08:42,105 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16149.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:08:48,416 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.55 vs. limit=5.0 +2023-03-26 02:08:52,385 INFO [finetune.py:976] (5/7) Epoch 3, batch 4700, loss[loss=0.2036, simple_loss=0.2595, pruned_loss=0.07382, over 4875.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.2912, pruned_loss=0.09303, over 955445.94 frames. ], batch size: 31, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:08:52,494 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8419, 1.6520, 1.7674, 1.8537, 1.2135, 3.5866, 1.4393, 1.9680], + device='cuda:5'), covar=tensor([0.3224, 0.2292, 0.1850, 0.2125, 0.1883, 0.0165, 0.2531, 0.1360], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0111, 0.0116, 0.0120, 0.0116, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 02:09:41,915 INFO [finetune.py:976] (5/7) Epoch 3, batch 4750, loss[loss=0.2721, simple_loss=0.3154, pruned_loss=0.1144, over 4844.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.289, pruned_loss=0.09232, over 955641.34 frames. ], batch size: 44, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:09:44,996 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16210.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:09:49,754 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.759e+02 2.065e+02 2.416e+02 4.123e+02, threshold=4.129e+02, percent-clipped=0.0 +2023-03-26 02:10:03,623 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16239.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:10:16,138 INFO [finetune.py:976] (5/7) Epoch 3, batch 4800, loss[loss=0.2486, simple_loss=0.3088, pruned_loss=0.09424, over 4755.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.2912, pruned_loss=0.09309, over 954865.62 frames. ], batch size: 59, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:11:10,320 INFO [finetune.py:976] (5/7) Epoch 3, batch 4850, loss[loss=0.24, simple_loss=0.2959, pruned_loss=0.09209, over 4835.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2951, pruned_loss=0.09414, over 954858.53 frames. ], batch size: 49, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:11:18,714 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.278e+02 1.868e+02 2.289e+02 2.628e+02 4.977e+02, threshold=4.577e+02, percent-clipped=4.0 +2023-03-26 02:11:53,000 INFO [finetune.py:976] (5/7) Epoch 3, batch 4900, loss[loss=0.2204, simple_loss=0.2835, pruned_loss=0.07865, over 4916.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2971, pruned_loss=0.09535, over 952893.75 frames. ], batch size: 38, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:11:53,738 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16357.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:12:04,214 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16371.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:12:05,846 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-03-26 02:12:35,111 INFO [finetune.py:976] (5/7) Epoch 3, batch 4950, loss[loss=0.259, simple_loss=0.2886, pruned_loss=0.1147, over 3927.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.2977, pruned_loss=0.09541, over 952756.28 frames. ], batch size: 16, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:12:43,743 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.165e+02 1.793e+02 2.170e+02 2.564e+02 4.726e+02, threshold=4.340e+02, percent-clipped=1.0 +2023-03-26 02:12:53,384 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16432.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:12:59,270 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16441.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:13:11,536 INFO [finetune.py:976] (5/7) Epoch 3, batch 5000, loss[loss=0.1954, simple_loss=0.2538, pruned_loss=0.06844, over 4794.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.2946, pruned_loss=0.09382, over 953869.78 frames. ], batch size: 51, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:14:08,366 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16505.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:14:08,898 INFO [finetune.py:976] (5/7) Epoch 3, batch 5050, loss[loss=0.207, simple_loss=0.2714, pruned_loss=0.0713, over 4798.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.2911, pruned_loss=0.09256, over 953835.92 frames. ], batch size: 29, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:14:27,727 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.680e+02 2.024e+02 2.446e+02 4.498e+02, threshold=4.048e+02, percent-clipped=1.0 +2023-03-26 02:14:49,245 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16539.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:15:09,830 INFO [finetune.py:976] (5/7) Epoch 3, batch 5100, loss[loss=0.1945, simple_loss=0.2547, pruned_loss=0.06714, over 4769.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.288, pruned_loss=0.0904, over 956527.75 frames. ], batch size: 28, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:15:36,573 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=16587.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:15:40,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7767, 1.1878, 0.9064, 1.6503, 2.0769, 1.2999, 1.4577, 1.7207], + device='cuda:5'), covar=tensor([0.1388, 0.1976, 0.2166, 0.1077, 0.1853, 0.2171, 0.1295, 0.1676], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0098, 0.0117, 0.0093, 0.0124, 0.0097, 0.0100, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 02:15:52,951 INFO [finetune.py:976] (5/7) Epoch 3, batch 5150, loss[loss=0.3081, simple_loss=0.3397, pruned_loss=0.1383, over 4873.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.2882, pruned_loss=0.09067, over 954683.03 frames. ], batch size: 34, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:16:12,054 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.762e+02 2.113e+02 2.590e+02 4.768e+02, threshold=4.226e+02, percent-clipped=2.0 +2023-03-26 02:16:22,623 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.8411, 4.1913, 4.3796, 4.6531, 4.5772, 4.2455, 4.9409, 1.5923], + device='cuda:5'), covar=tensor([0.0703, 0.0753, 0.0653, 0.0857, 0.1161, 0.1427, 0.0518, 0.5388], + device='cuda:5'), in_proj_covar=tensor([0.0366, 0.0246, 0.0279, 0.0296, 0.0344, 0.0290, 0.0313, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:16:41,152 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-26 02:16:47,969 INFO [finetune.py:976] (5/7) Epoch 3, batch 5200, loss[loss=0.2526, simple_loss=0.3089, pruned_loss=0.09819, over 4787.00 frames. ], tot_loss[loss=0.24, simple_loss=0.2936, pruned_loss=0.09325, over 954938.04 frames. ], batch size: 29, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:16:48,691 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16657.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:17:11,047 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-03-26 02:17:28,183 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7099, 1.4346, 2.2373, 3.4601, 2.3526, 2.4830, 1.3116, 2.6591], + device='cuda:5'), covar=tensor([0.1965, 0.1724, 0.1370, 0.0590, 0.0895, 0.1448, 0.1881, 0.0699], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0119, 0.0138, 0.0165, 0.0104, 0.0143, 0.0129, 0.0106], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 02:17:40,754 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=16705.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:17:41,306 INFO [finetune.py:976] (5/7) Epoch 3, batch 5250, loss[loss=0.2731, simple_loss=0.326, pruned_loss=0.1101, over 4833.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.2964, pruned_loss=0.0946, over 953269.37 frames. ], batch size: 47, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:17:53,303 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.211e+02 1.833e+02 2.103e+02 2.647e+02 4.683e+02, threshold=4.205e+02, percent-clipped=1.0 +2023-03-26 02:18:00,562 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16727.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:18:12,277 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16741.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:18:17,735 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3880, 1.9276, 2.3092, 1.1391, 2.4948, 2.6532, 2.1523, 2.1187], + device='cuda:5'), covar=tensor([0.0995, 0.0863, 0.0465, 0.0841, 0.0537, 0.0414, 0.0457, 0.0597], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0158, 0.0117, 0.0137, 0.0132, 0.0120, 0.0146, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.7846e-05, 1.1735e-04, 8.4854e-05, 1.0039e-04, 9.5606e-05, 8.9128e-05, + 1.0854e-04, 1.0710e-04], device='cuda:5') +2023-03-26 02:18:21,891 INFO [finetune.py:976] (5/7) Epoch 3, batch 5300, loss[loss=0.2075, simple_loss=0.2818, pruned_loss=0.06664, over 4747.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.298, pruned_loss=0.0954, over 953245.53 frames. ], batch size: 27, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:18:23,950 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.23 vs. limit=5.0 +2023-03-26 02:18:24,534 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6001, 1.5539, 1.5232, 1.8230, 2.2594, 1.7727, 1.2524, 1.3459], + device='cuda:5'), covar=tensor([0.2945, 0.2793, 0.2436, 0.2111, 0.2150, 0.1457, 0.3487, 0.2393], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0207, 0.0196, 0.0181, 0.0231, 0.0171, 0.0212, 0.0185], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:18:39,870 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1527, 3.6631, 3.7910, 4.0076, 3.8626, 3.6411, 4.2700, 1.3321], + device='cuda:5'), covar=tensor([0.0746, 0.0802, 0.0785, 0.0875, 0.1213, 0.1490, 0.0657, 0.5022], + device='cuda:5'), in_proj_covar=tensor([0.0366, 0.0246, 0.0279, 0.0297, 0.0344, 0.0290, 0.0312, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:18:49,198 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=16789.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:19:04,917 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.68 vs. limit=5.0 +2023-03-26 02:19:07,676 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16805.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:19:08,225 INFO [finetune.py:976] (5/7) Epoch 3, batch 5350, loss[loss=0.2655, simple_loss=0.3098, pruned_loss=0.1106, over 4741.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2971, pruned_loss=0.09467, over 954010.06 frames. ], batch size: 54, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:19:21,976 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.184e+02 1.880e+02 2.227e+02 2.511e+02 3.677e+02, threshold=4.454e+02, percent-clipped=0.0 +2023-03-26 02:19:27,016 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3826, 1.9664, 2.4908, 1.1493, 2.5936, 2.6803, 2.1936, 2.2267], + device='cuda:5'), covar=tensor([0.1142, 0.0935, 0.0428, 0.0947, 0.0596, 0.1038, 0.0562, 0.0803], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0158, 0.0117, 0.0136, 0.0132, 0.0121, 0.0146, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.7570e-05, 1.1725e-04, 8.4978e-05, 1.0008e-04, 9.5715e-05, 8.9281e-05, + 1.0865e-04, 1.0729e-04], device='cuda:5') +2023-03-26 02:19:46,295 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=16853.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:19:48,085 INFO [finetune.py:976] (5/7) Epoch 3, batch 5400, loss[loss=0.2959, simple_loss=0.3242, pruned_loss=0.1338, over 4822.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2927, pruned_loss=0.09252, over 954755.79 frames. ], batch size: 39, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:19:55,544 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16868.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:20:03,973 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 02:20:03,978 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 02:20:19,596 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8861, 1.7392, 1.4200, 1.7360, 1.6991, 1.5979, 1.6066, 2.4888], + device='cuda:5'), covar=tensor([1.0643, 1.0156, 0.8225, 1.0857, 0.8618, 0.6162, 1.0521, 0.3316], + device='cuda:5'), in_proj_covar=tensor([0.0272, 0.0248, 0.0218, 0.0283, 0.0234, 0.0195, 0.0238, 0.0186], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 02:20:31,378 INFO [finetune.py:976] (5/7) Epoch 3, batch 5450, loss[loss=0.1763, simple_loss=0.2439, pruned_loss=0.05432, over 4755.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.2884, pruned_loss=0.09057, over 954164.86 frames. ], batch size: 27, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:20:31,472 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16906.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:20:38,655 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.665e+02 2.000e+02 2.450e+02 4.433e+02, threshold=4.000e+02, percent-clipped=0.0 +2023-03-26 02:20:46,041 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16929.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 02:21:14,238 INFO [finetune.py:976] (5/7) Epoch 3, batch 5500, loss[loss=0.2083, simple_loss=0.2817, pruned_loss=0.06742, over 4914.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.2855, pruned_loss=0.0895, over 953727.68 frames. ], batch size: 37, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:21:26,193 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16967.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:22:07,776 INFO [finetune.py:976] (5/7) Epoch 3, batch 5550, loss[loss=0.2139, simple_loss=0.2802, pruned_loss=0.07379, over 4843.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2886, pruned_loss=0.09106, over 952551.44 frames. ], batch size: 47, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:22:10,644 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-03-26 02:22:13,292 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5319, 1.4207, 1.2910, 1.2612, 1.6087, 1.3099, 1.7249, 1.5189], + device='cuda:5'), covar=tensor([0.2207, 0.3555, 0.4579, 0.3752, 0.3346, 0.2380, 0.3660, 0.2815], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0195, 0.0239, 0.0255, 0.0221, 0.0187, 0.0210, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:22:15,687 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.712e+02 2.015e+02 2.380e+02 4.122e+02, threshold=4.030e+02, percent-clipped=1.0 +2023-03-26 02:22:19,610 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-03-26 02:22:21,781 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17027.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:22:53,358 INFO [finetune.py:976] (5/7) Epoch 3, batch 5600, loss[loss=0.2516, simple_loss=0.3074, pruned_loss=0.09794, over 4256.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2931, pruned_loss=0.09231, over 953884.81 frames. ], batch size: 65, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:23:10,684 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=17075.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:23:11,858 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6778, 3.6974, 3.5733, 1.8047, 3.9147, 2.7859, 0.9048, 2.6184], + device='cuda:5'), covar=tensor([0.2334, 0.2310, 0.1606, 0.3358, 0.1035, 0.1057, 0.4453, 0.1534], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0168, 0.0162, 0.0128, 0.0154, 0.0120, 0.0145, 0.0120], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 02:23:38,819 INFO [finetune.py:976] (5/7) Epoch 3, batch 5650, loss[loss=0.2353, simple_loss=0.2951, pruned_loss=0.08781, over 4914.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2968, pruned_loss=0.09376, over 954031.84 frames. ], batch size: 36, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:23:45,803 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.755e+02 2.152e+02 2.681e+02 4.789e+02, threshold=4.305e+02, percent-clipped=1.0 +2023-03-26 02:24:07,761 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8372, 1.2958, 0.9927, 1.6525, 2.2265, 1.1360, 1.4817, 1.6831], + device='cuda:5'), covar=tensor([0.1327, 0.1946, 0.2078, 0.1136, 0.1742, 0.2117, 0.1370, 0.1741], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0118, 0.0095, 0.0124, 0.0098, 0.0101, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 02:24:15,374 INFO [finetune.py:976] (5/7) Epoch 3, batch 5700, loss[loss=0.2029, simple_loss=0.2542, pruned_loss=0.07579, over 4107.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.293, pruned_loss=0.0938, over 934148.13 frames. ], batch size: 18, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:24:56,867 INFO [finetune.py:976] (5/7) Epoch 4, batch 0, loss[loss=0.3024, simple_loss=0.3394, pruned_loss=0.1327, over 4293.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3394, pruned_loss=0.1327, over 4293.00 frames. ], batch size: 19, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:24:56,867 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 02:25:05,691 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2044, 1.3446, 1.3923, 0.8075, 1.2809, 1.4685, 1.6200, 1.3483], + device='cuda:5'), covar=tensor([0.1236, 0.0711, 0.0616, 0.0612, 0.0512, 0.0780, 0.0415, 0.0882], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0157, 0.0117, 0.0136, 0.0132, 0.0121, 0.0146, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.8181e-05, 1.1719e-04, 8.5409e-05, 1.0017e-04, 9.5355e-05, 9.0035e-05, + 1.0877e-04, 1.0736e-04], device='cuda:5') +2023-03-26 02:25:18,215 INFO [finetune.py:1010] (5/7) Epoch 4, validation: loss=0.1768, simple_loss=0.2473, pruned_loss=0.0532, over 2265189.00 frames. +2023-03-26 02:25:18,215 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6316MB +2023-03-26 02:25:22,724 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17189.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:25:55,799 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.713e+02 2.128e+02 2.708e+02 4.853e+02, threshold=4.257e+02, percent-clipped=3.0 +2023-03-26 02:26:00,040 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17224.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 02:26:01,335 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0957, 1.3827, 1.8870, 1.7852, 1.6975, 1.6691, 1.6915, 1.7786], + device='cuda:5'), covar=tensor([0.6070, 1.0595, 0.8737, 0.9080, 1.0059, 0.7262, 1.1224, 0.7771], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0254, 0.0256, 0.0266, 0.0245, 0.0219, 0.0280, 0.0221], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:26:05,891 INFO [finetune.py:976] (5/7) Epoch 4, batch 50, loss[loss=0.2619, simple_loss=0.3086, pruned_loss=0.1076, over 4775.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.2983, pruned_loss=0.09597, over 215740.03 frames. ], batch size: 29, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:26:29,256 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17250.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:26:36,460 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17262.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 02:26:55,371 INFO [finetune.py:976] (5/7) Epoch 4, batch 100, loss[loss=0.2425, simple_loss=0.2835, pruned_loss=0.1008, over 4712.00 frames. ], tot_loss[loss=0.238, simple_loss=0.29, pruned_loss=0.09301, over 378721.95 frames. ], batch size: 23, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:27:26,864 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.228e+02 1.697e+02 1.982e+02 2.273e+02 3.827e+02, threshold=3.964e+02, percent-clipped=0.0 +2023-03-26 02:27:34,390 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8399, 1.6066, 1.4072, 1.4377, 1.5553, 1.5607, 1.4849, 2.3967], + device='cuda:5'), covar=tensor([0.9484, 1.0109, 0.7589, 1.0023, 0.8199, 0.5054, 0.9719, 0.3150], + device='cuda:5'), in_proj_covar=tensor([0.0275, 0.0251, 0.0219, 0.0284, 0.0236, 0.0196, 0.0240, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 02:27:36,644 INFO [finetune.py:976] (5/7) Epoch 4, batch 150, loss[loss=0.2349, simple_loss=0.2826, pruned_loss=0.0936, over 4840.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2849, pruned_loss=0.09082, over 506719.33 frames. ], batch size: 47, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:27:45,344 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1786, 1.2929, 1.0773, 1.3334, 1.4333, 2.4239, 1.1599, 1.4329], + device='cuda:5'), covar=tensor([0.0973, 0.1723, 0.1165, 0.0956, 0.1543, 0.0385, 0.1552, 0.1654], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0081, 0.0077, 0.0079, 0.0092, 0.0082, 0.0085, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 02:28:01,970 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17362.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:28:16,162 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-26 02:28:25,903 INFO [finetune.py:976] (5/7) Epoch 4, batch 200, loss[loss=0.1934, simple_loss=0.2596, pruned_loss=0.06355, over 4745.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2869, pruned_loss=0.09226, over 606604.21 frames. ], batch size: 27, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:28:26,036 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17383.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:28:55,776 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.771e+02 2.098e+02 2.514e+02 4.657e+02, threshold=4.195e+02, percent-clipped=1.0 +2023-03-26 02:29:01,228 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17423.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:29:09,065 INFO [finetune.py:976] (5/7) Epoch 4, batch 250, loss[loss=0.3119, simple_loss=0.3479, pruned_loss=0.1379, over 4741.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.2899, pruned_loss=0.09266, over 685568.77 frames. ], batch size: 54, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:29:15,506 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5643, 1.4245, 1.3049, 1.1618, 1.6245, 1.3159, 1.7303, 1.4996], + device='cuda:5'), covar=tensor([0.2033, 0.3643, 0.4460, 0.4102, 0.3294, 0.2306, 0.3869, 0.2859], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0195, 0.0239, 0.0255, 0.0222, 0.0187, 0.0210, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:29:17,853 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17444.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 02:29:47,693 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.26 vs. limit=5.0 +2023-03-26 02:29:49,002 INFO [finetune.py:976] (5/7) Epoch 4, batch 300, loss[loss=0.2564, simple_loss=0.3088, pruned_loss=0.102, over 4896.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.2975, pruned_loss=0.09599, over 746552.50 frames. ], batch size: 35, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:29:55,492 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8661, 1.7080, 1.4436, 1.7448, 1.8740, 1.5553, 2.2449, 1.8285], + device='cuda:5'), covar=tensor([0.1875, 0.3721, 0.4305, 0.3667, 0.2998, 0.2158, 0.3573, 0.2675], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0195, 0.0238, 0.0254, 0.0222, 0.0186, 0.0209, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:30:24,393 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 02:30:35,039 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.964e+02 2.271e+02 2.699e+02 6.272e+02, threshold=4.542e+02, percent-clipped=2.0 +2023-03-26 02:30:39,316 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17524.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:30:44,608 INFO [finetune.py:976] (5/7) Epoch 4, batch 350, loss[loss=0.2461, simple_loss=0.2966, pruned_loss=0.09774, over 4881.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.2973, pruned_loss=0.09505, over 793507.91 frames. ], batch size: 32, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:30:53,140 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17545.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:30:54,399 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6926, 1.7007, 1.6723, 0.9984, 1.8174, 1.7630, 1.7790, 1.5531], + device='cuda:5'), covar=tensor([0.0674, 0.0672, 0.0833, 0.1139, 0.0622, 0.0785, 0.0712, 0.1146], + device='cuda:5'), in_proj_covar=tensor([0.0139, 0.0134, 0.0145, 0.0130, 0.0112, 0.0143, 0.0148, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:31:05,367 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.9722, 4.3015, 4.4238, 4.7674, 4.6095, 4.2659, 5.0319, 1.7322], + device='cuda:5'), covar=tensor([0.0668, 0.0757, 0.0715, 0.0833, 0.1254, 0.1431, 0.0541, 0.5150], + device='cuda:5'), in_proj_covar=tensor([0.0361, 0.0244, 0.0276, 0.0293, 0.0342, 0.0285, 0.0309, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:31:08,234 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 02:31:10,207 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17562.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:31:16,312 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=17572.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:31:23,350 INFO [finetune.py:976] (5/7) Epoch 4, batch 400, loss[loss=0.2038, simple_loss=0.2778, pruned_loss=0.06484, over 4781.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.298, pruned_loss=0.09441, over 830043.78 frames. ], batch size: 29, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:31:46,313 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=17610.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:31:49,982 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6674, 1.5681, 1.5195, 1.5866, 1.1476, 3.1109, 1.3098, 1.7876], + device='cuda:5'), covar=tensor([0.3144, 0.2248, 0.1887, 0.2014, 0.1773, 0.0202, 0.2772, 0.1236], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0111, 0.0116, 0.0119, 0.0116, 0.0096, 0.0100, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 02:31:51,079 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.790e+02 1.987e+02 2.567e+02 5.687e+02, threshold=3.975e+02, percent-clipped=1.0 +2023-03-26 02:32:10,369 INFO [finetune.py:976] (5/7) Epoch 4, batch 450, loss[loss=0.1843, simple_loss=0.2401, pruned_loss=0.06419, over 4790.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.2947, pruned_loss=0.093, over 856567.90 frames. ], batch size: 29, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:33:00,233 INFO [finetune.py:976] (5/7) Epoch 4, batch 500, loss[loss=0.2242, simple_loss=0.2673, pruned_loss=0.09053, over 4282.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2905, pruned_loss=0.09112, over 879690.46 frames. ], batch size: 65, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:33:11,521 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17700.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:33:24,347 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.264e+02 1.808e+02 2.091e+02 2.485e+02 4.480e+02, threshold=4.181e+02, percent-clipped=1.0 +2023-03-26 02:33:24,435 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17718.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:33:31,866 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17730.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:33:34,061 INFO [finetune.py:976] (5/7) Epoch 4, batch 550, loss[loss=0.2538, simple_loss=0.2978, pruned_loss=0.1049, over 4935.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.2879, pruned_loss=0.09062, over 897230.22 frames. ], batch size: 33, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:33:37,769 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17739.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:34:03,945 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17761.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:34:06,910 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7909, 1.2647, 0.8990, 1.7616, 2.1307, 1.3976, 1.5568, 1.7801], + device='cuda:5'), covar=tensor([0.1645, 0.2295, 0.2222, 0.1293, 0.2041, 0.2132, 0.1504, 0.1988], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0117, 0.0094, 0.0125, 0.0098, 0.0100, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 02:34:11,234 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 02:34:17,632 INFO [finetune.py:976] (5/7) Epoch 4, batch 600, loss[loss=0.2115, simple_loss=0.2663, pruned_loss=0.07835, over 4808.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.2867, pruned_loss=0.08985, over 910824.81 frames. ], batch size: 25, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:34:22,583 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17791.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:34:41,399 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.036e+02 1.729e+02 2.101e+02 2.480e+02 7.519e+02, threshold=4.202e+02, percent-clipped=3.0 +2023-03-26 02:34:50,418 INFO [finetune.py:976] (5/7) Epoch 4, batch 650, loss[loss=0.2463, simple_loss=0.2923, pruned_loss=0.1001, over 4752.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2899, pruned_loss=0.09067, over 921780.10 frames. ], batch size: 28, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:34:59,979 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17845.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:35:31,912 INFO [finetune.py:976] (5/7) Epoch 4, batch 700, loss[loss=0.2589, simple_loss=0.3099, pruned_loss=0.104, over 4889.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.292, pruned_loss=0.09174, over 930274.23 frames. ], batch size: 32, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:35:46,369 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=17893.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:35:46,431 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9459, 1.7885, 2.3309, 1.4626, 1.9402, 2.1646, 1.7610, 2.4521], + device='cuda:5'), covar=tensor([0.1679, 0.2228, 0.1611, 0.2381, 0.0995, 0.1732, 0.2655, 0.0973], + device='cuda:5'), in_proj_covar=tensor([0.0208, 0.0209, 0.0206, 0.0199, 0.0184, 0.0228, 0.0218, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:35:47,688 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1092, 2.4690, 2.3499, 1.4749, 2.5969, 2.2005, 1.9880, 2.2472], + device='cuda:5'), covar=tensor([0.0726, 0.1223, 0.2041, 0.2415, 0.1743, 0.2101, 0.2160, 0.1344], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0200, 0.0204, 0.0190, 0.0218, 0.0210, 0.0218, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:36:18,031 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.240e+02 1.804e+02 2.027e+02 2.461e+02 4.855e+02, threshold=4.055e+02, percent-clipped=2.0 +2023-03-26 02:36:18,757 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 02:36:34,751 INFO [finetune.py:976] (5/7) Epoch 4, batch 750, loss[loss=0.2643, simple_loss=0.3246, pruned_loss=0.102, over 4792.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.2932, pruned_loss=0.09168, over 936319.61 frames. ], batch size: 45, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:36:34,931 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8253, 1.2065, 1.5833, 1.5957, 1.4604, 1.4164, 1.4913, 1.5454], + device='cuda:5'), covar=tensor([0.8025, 1.2343, 0.9574, 1.0979, 1.2286, 0.8575, 1.4261, 0.8868], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0254, 0.0256, 0.0266, 0.0244, 0.0218, 0.0279, 0.0221], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:37:08,579 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17960.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:37:09,017 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 02:37:29,043 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.82 vs. limit=5.0 +2023-03-26 02:37:30,393 INFO [finetune.py:976] (5/7) Epoch 4, batch 800, loss[loss=0.2311, simple_loss=0.2788, pruned_loss=0.09175, over 4788.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2922, pruned_loss=0.09049, over 939734.73 frames. ], batch size: 26, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:38:05,563 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.784e+02 2.191e+02 2.808e+02 5.190e+02, threshold=4.382e+02, percent-clipped=3.0 +2023-03-26 02:38:05,662 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18018.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:38:08,536 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18021.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:38:20,834 INFO [finetune.py:976] (5/7) Epoch 4, batch 850, loss[loss=0.1836, simple_loss=0.2506, pruned_loss=0.05832, over 4756.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2902, pruned_loss=0.09016, over 943408.39 frames. ], batch size: 54, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:38:23,814 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 02:38:26,576 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18039.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:38:33,260 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6056, 1.5750, 1.6128, 0.9598, 1.5758, 1.8638, 1.8535, 1.4920], + device='cuda:5'), covar=tensor([0.1037, 0.0630, 0.0417, 0.0614, 0.0391, 0.0389, 0.0271, 0.0490], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0159, 0.0118, 0.0138, 0.0134, 0.0123, 0.0147, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.8557e-05, 1.1816e-04, 8.5457e-05, 1.0136e-04, 9.7346e-05, 9.1182e-05, + 1.0997e-04, 1.0781e-04], device='cuda:5') +2023-03-26 02:38:38,032 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6569, 2.3039, 2.8896, 1.8011, 2.4235, 2.7857, 2.2215, 2.8967], + device='cuda:5'), covar=tensor([0.1456, 0.2034, 0.1601, 0.2374, 0.1115, 0.1692, 0.2264, 0.1142], + device='cuda:5'), in_proj_covar=tensor([0.0207, 0.0207, 0.0204, 0.0199, 0.0183, 0.0228, 0.0217, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:38:39,174 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18056.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:38:45,706 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=18066.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:38:57,812 INFO [finetune.py:976] (5/7) Epoch 4, batch 900, loss[loss=0.2776, simple_loss=0.3068, pruned_loss=0.1242, over 4917.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2865, pruned_loss=0.08848, over 945686.31 frames. ], batch size: 36, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:38:59,685 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18086.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:39:00,271 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=18087.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:39:19,669 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.218e+02 1.720e+02 1.921e+02 2.312e+02 4.297e+02, threshold=3.842e+02, percent-clipped=0.0 +2023-03-26 02:39:35,996 INFO [finetune.py:976] (5/7) Epoch 4, batch 950, loss[loss=0.2438, simple_loss=0.2946, pruned_loss=0.09652, over 4872.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.2849, pruned_loss=0.08805, over 948942.85 frames. ], batch size: 31, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:39:46,516 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-03-26 02:39:58,197 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6768, 1.7412, 1.8872, 0.8901, 1.8264, 2.0587, 2.0313, 1.6516], + device='cuda:5'), covar=tensor([0.0961, 0.0710, 0.0336, 0.0679, 0.0354, 0.0424, 0.0268, 0.0560], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0158, 0.0117, 0.0138, 0.0133, 0.0122, 0.0147, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.8175e-05, 1.1752e-04, 8.4900e-05, 1.0122e-04, 9.6453e-05, 9.0671e-05, + 1.0946e-04, 1.0701e-04], device='cuda:5') +2023-03-26 02:40:22,954 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7236, 1.6325, 1.5152, 1.6126, 1.3718, 3.9835, 1.6460, 2.1035], + device='cuda:5'), covar=tensor([0.3589, 0.2435, 0.2154, 0.2336, 0.1796, 0.0115, 0.2666, 0.1390], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0112, 0.0116, 0.0120, 0.0116, 0.0097, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 02:40:22,964 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18174.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:40:24,022 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18175.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:40:29,234 INFO [finetune.py:976] (5/7) Epoch 4, batch 1000, loss[loss=0.319, simple_loss=0.378, pruned_loss=0.13, over 4835.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.2901, pruned_loss=0.09047, over 951292.42 frames. ], batch size: 49, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:40:41,629 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1834, 3.5839, 3.7686, 3.9973, 3.9276, 3.7172, 4.2416, 1.6162], + device='cuda:5'), covar=tensor([0.0786, 0.0898, 0.0940, 0.0960, 0.1332, 0.1452, 0.0768, 0.5014], + device='cuda:5'), in_proj_covar=tensor([0.0364, 0.0246, 0.0282, 0.0296, 0.0344, 0.0288, 0.0312, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:41:07,309 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.077e+02 1.690e+02 2.099e+02 2.471e+02 3.966e+02, threshold=4.198e+02, percent-clipped=1.0 +2023-03-26 02:41:14,729 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2516, 2.0635, 2.2623, 1.0969, 2.2843, 2.5981, 2.1499, 2.1174], + device='cuda:5'), covar=tensor([0.1244, 0.0898, 0.0473, 0.0837, 0.0602, 0.0538, 0.0481, 0.0712], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0159, 0.0117, 0.0138, 0.0134, 0.0123, 0.0147, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.8591e-05, 1.1804e-04, 8.5311e-05, 1.0151e-04, 9.6769e-05, 9.0953e-05, + 1.0995e-04, 1.0752e-04], device='cuda:5') +2023-03-26 02:41:28,636 INFO [finetune.py:976] (5/7) Epoch 4, batch 1050, loss[loss=0.1993, simple_loss=0.2707, pruned_loss=0.06398, over 4880.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.2914, pruned_loss=0.09025, over 953698.44 frames. ], batch size: 43, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:41:29,970 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18235.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:41:30,598 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18236.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:41:33,074 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9836, 1.8891, 1.5315, 1.9447, 1.8550, 1.7927, 1.7793, 2.8056], + device='cuda:5'), covar=tensor([0.9264, 1.1148, 0.7625, 1.1198, 0.8665, 0.5333, 1.0441, 0.3121], + device='cuda:5'), in_proj_covar=tensor([0.0274, 0.0250, 0.0218, 0.0282, 0.0234, 0.0196, 0.0238, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 02:42:18,620 INFO [finetune.py:976] (5/7) Epoch 4, batch 1100, loss[loss=0.1858, simple_loss=0.2468, pruned_loss=0.06242, over 4758.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.2931, pruned_loss=0.09126, over 951939.61 frames. ], batch size: 26, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:42:31,533 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6744, 1.5593, 1.5198, 1.5952, 1.1582, 3.6837, 1.3507, 1.8254], + device='cuda:5'), covar=tensor([0.3691, 0.2434, 0.2203, 0.2380, 0.2003, 0.0151, 0.2574, 0.1423], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0111, 0.0116, 0.0119, 0.0116, 0.0096, 0.0100, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 02:42:38,773 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6467, 1.6118, 1.5141, 1.7800, 2.1645, 1.7048, 1.4237, 1.3643], + device='cuda:5'), covar=tensor([0.2525, 0.2501, 0.2129, 0.1976, 0.2193, 0.1411, 0.3138, 0.2093], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0210, 0.0198, 0.0184, 0.0233, 0.0174, 0.0214, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:42:49,595 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18316.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:42:50,759 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.851e+02 2.220e+02 2.754e+02 4.687e+02, threshold=4.440e+02, percent-clipped=1.0 +2023-03-26 02:43:08,653 INFO [finetune.py:976] (5/7) Epoch 4, batch 1150, loss[loss=0.2139, simple_loss=0.2655, pruned_loss=0.08112, over 4744.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.2947, pruned_loss=0.09209, over 953042.13 frames. ], batch size: 23, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:43:19,814 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8646, 3.3720, 3.5412, 3.7889, 3.5732, 3.3727, 3.9108, 1.1823], + device='cuda:5'), covar=tensor([0.0765, 0.0750, 0.0792, 0.0839, 0.1201, 0.1361, 0.0785, 0.4832], + device='cuda:5'), in_proj_covar=tensor([0.0362, 0.0245, 0.0279, 0.0295, 0.0342, 0.0286, 0.0310, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:43:21,097 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1312, 1.7678, 1.3317, 0.6125, 1.5890, 1.8151, 1.4216, 1.7426], + device='cuda:5'), covar=tensor([0.0821, 0.0827, 0.1311, 0.1837, 0.1140, 0.2081, 0.2073, 0.0774], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0199, 0.0203, 0.0190, 0.0217, 0.0210, 0.0218, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:43:25,364 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18356.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:44:03,369 INFO [finetune.py:976] (5/7) Epoch 4, batch 1200, loss[loss=0.2218, simple_loss=0.261, pruned_loss=0.09128, over 4825.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2935, pruned_loss=0.09216, over 953044.14 frames. ], batch size: 25, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:44:05,828 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18386.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:44:27,164 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5148, 1.3179, 1.2685, 1.4655, 1.6824, 1.4607, 0.8455, 1.2783], + device='cuda:5'), covar=tensor([0.2598, 0.2505, 0.2199, 0.1968, 0.1785, 0.1335, 0.3169, 0.2071], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0208, 0.0197, 0.0183, 0.0232, 0.0173, 0.0213, 0.0186], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:44:28,300 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=18404.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:44:28,785 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 02:44:36,088 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 02:44:47,474 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.754e+02 2.082e+02 2.492e+02 3.668e+02, threshold=4.164e+02, percent-clipped=0.0 +2023-03-26 02:45:07,910 INFO [finetune.py:976] (5/7) Epoch 4, batch 1250, loss[loss=0.1948, simple_loss=0.257, pruned_loss=0.06629, over 4827.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2907, pruned_loss=0.09104, over 954725.99 frames. ], batch size: 33, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:45:09,084 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=18434.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:45:59,527 INFO [finetune.py:976] (5/7) Epoch 4, batch 1300, loss[loss=0.2087, simple_loss=0.2604, pruned_loss=0.07847, over 4754.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.2878, pruned_loss=0.08991, over 956356.35 frames. ], batch size: 23, lr: 3.97e-03, grad_scale: 64.0 +2023-03-26 02:46:10,684 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18490.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:46:22,651 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18508.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:46:26,381 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-26 02:46:29,711 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.658e+02 2.008e+02 2.632e+02 4.281e+02, threshold=4.017e+02, percent-clipped=1.0 +2023-03-26 02:46:36,934 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18530.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:46:37,494 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18531.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:46:41,948 INFO [finetune.py:976] (5/7) Epoch 4, batch 1350, loss[loss=0.2228, simple_loss=0.2786, pruned_loss=0.08346, over 4886.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2893, pruned_loss=0.09098, over 957941.44 frames. ], batch size: 32, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:46:55,246 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18551.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:47:11,803 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18569.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 02:47:25,812 INFO [finetune.py:976] (5/7) Epoch 4, batch 1400, loss[loss=0.2901, simple_loss=0.3364, pruned_loss=0.1219, over 4754.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.2936, pruned_loss=0.09292, over 957846.23 frames. ], batch size: 59, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:47:53,788 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18616.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:47:55,465 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.863e+02 2.207e+02 2.712e+02 5.337e+02, threshold=4.415e+02, percent-clipped=2.0 +2023-03-26 02:48:10,032 INFO [finetune.py:976] (5/7) Epoch 4, batch 1450, loss[loss=0.2552, simple_loss=0.3021, pruned_loss=0.1041, over 4865.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.2955, pruned_loss=0.09403, over 955641.04 frames. ], batch size: 31, lr: 3.97e-03, grad_scale: 32.0 +2023-03-26 02:48:39,162 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5875, 1.5590, 2.1383, 3.2310, 2.2953, 2.3435, 1.1261, 2.5059], + device='cuda:5'), covar=tensor([0.2006, 0.1646, 0.1301, 0.0645, 0.0860, 0.1521, 0.1924, 0.0717], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0119, 0.0137, 0.0166, 0.0104, 0.0143, 0.0129, 0.0106], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 02:48:43,321 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=18664.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:48:47,666 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.01 vs. limit=5.0 +2023-03-26 02:48:49,417 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18674.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:48:53,644 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.61 vs. limit=5.0 +2023-03-26 02:48:56,990 INFO [finetune.py:976] (5/7) Epoch 4, batch 1500, loss[loss=0.2564, simple_loss=0.2979, pruned_loss=0.1074, over 4879.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.2957, pruned_loss=0.09351, over 957063.51 frames. ], batch size: 35, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:49:17,277 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4256, 1.2834, 1.3792, 1.3399, 0.7794, 2.2593, 0.7299, 1.3057], + device='cuda:5'), covar=tensor([0.3253, 0.2380, 0.2038, 0.2281, 0.2123, 0.0327, 0.2791, 0.1395], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0112, 0.0117, 0.0120, 0.0116, 0.0097, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 02:49:36,303 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.810e+02 2.122e+02 2.509e+02 6.153e+02, threshold=4.245e+02, percent-clipped=1.0 +2023-03-26 02:49:54,649 INFO [finetune.py:976] (5/7) Epoch 4, batch 1550, loss[loss=0.25, simple_loss=0.2875, pruned_loss=0.1063, over 4744.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.2947, pruned_loss=0.09299, over 956643.31 frames. ], batch size: 23, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:49:55,983 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18735.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:50:35,885 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-03-26 02:50:39,061 INFO [finetune.py:976] (5/7) Epoch 4, batch 1600, loss[loss=0.2282, simple_loss=0.2831, pruned_loss=0.0866, over 4854.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.2925, pruned_loss=0.09249, over 956381.45 frames. ], batch size: 49, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:51:19,524 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.645e+02 2.011e+02 2.399e+02 5.772e+02, threshold=4.021e+02, percent-clipped=1.0 +2023-03-26 02:51:30,351 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18830.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:51:30,944 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18831.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:51:32,083 INFO [finetune.py:976] (5/7) Epoch 4, batch 1650, loss[loss=0.2051, simple_loss=0.2611, pruned_loss=0.0746, over 4826.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.288, pruned_loss=0.08985, over 958081.31 frames. ], batch size: 51, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:51:48,121 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18846.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:52:01,677 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9486, 1.2585, 1.0094, 1.8299, 2.2061, 1.5381, 1.6102, 1.7773], + device='cuda:5'), covar=tensor([0.1483, 0.2282, 0.2351, 0.1303, 0.2076, 0.2217, 0.1503, 0.2081], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0100, 0.0117, 0.0095, 0.0126, 0.0098, 0.0101, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 02:52:05,196 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18864.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 02:52:23,038 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=18878.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:52:23,634 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=18879.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:52:25,972 INFO [finetune.py:976] (5/7) Epoch 4, batch 1700, loss[loss=0.267, simple_loss=0.3168, pruned_loss=0.1086, over 4847.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2861, pruned_loss=0.089, over 958533.21 frames. ], batch size: 47, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:53:00,727 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.073e+02 1.767e+02 2.149e+02 2.599e+02 5.673e+02, threshold=4.299e+02, percent-clipped=2.0 +2023-03-26 02:53:07,515 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18930.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:53:09,240 INFO [finetune.py:976] (5/7) Epoch 4, batch 1750, loss[loss=0.2073, simple_loss=0.2657, pruned_loss=0.07446, over 4773.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2891, pruned_loss=0.09105, over 957977.26 frames. ], batch size: 26, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:53:22,156 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7709, 1.0318, 1.5792, 1.5236, 1.3952, 1.3440, 1.4590, 1.3854], + device='cuda:5'), covar=tensor([0.7043, 1.0914, 0.8643, 1.0029, 1.0479, 0.8284, 1.2068, 0.8477], + device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0254, 0.0256, 0.0264, 0.0243, 0.0219, 0.0278, 0.0222], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:53:52,360 INFO [finetune.py:976] (5/7) Epoch 4, batch 1800, loss[loss=0.2468, simple_loss=0.294, pruned_loss=0.09975, over 4861.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.2929, pruned_loss=0.09226, over 955879.80 frames. ], batch size: 34, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:53:57,509 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18991.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:54:32,254 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.317e+02 1.869e+02 2.115e+02 2.590e+02 5.981e+02, threshold=4.230e+02, percent-clipped=1.0 +2023-03-26 02:54:50,226 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19030.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:54:52,013 INFO [finetune.py:976] (5/7) Epoch 4, batch 1850, loss[loss=0.2767, simple_loss=0.33, pruned_loss=0.1117, over 4888.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.2927, pruned_loss=0.09182, over 954470.61 frames. ], batch size: 35, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:54:53,991 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5500, 2.2321, 1.8743, 1.0143, 2.1453, 1.9487, 1.6437, 1.9466], + device='cuda:5'), covar=tensor([0.0732, 0.0990, 0.1785, 0.2362, 0.1694, 0.2183, 0.2346, 0.1168], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0199, 0.0203, 0.0189, 0.0216, 0.0208, 0.0218, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 02:55:39,895 INFO [finetune.py:976] (5/7) Epoch 4, batch 1900, loss[loss=0.2186, simple_loss=0.2853, pruned_loss=0.07591, over 4758.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.293, pruned_loss=0.09099, over 955966.59 frames. ], batch size: 54, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:56:12,754 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.752e+02 2.082e+02 2.658e+02 3.786e+02, threshold=4.164e+02, percent-clipped=0.0 +2023-03-26 02:56:25,565 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19129.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:56:33,399 INFO [finetune.py:976] (5/7) Epoch 4, batch 1950, loss[loss=0.3411, simple_loss=0.3579, pruned_loss=0.1622, over 4217.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.2918, pruned_loss=0.09087, over 953443.84 frames. ], batch size: 65, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:56:41,418 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19146.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:56:53,387 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19164.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:57:09,559 INFO [finetune.py:976] (5/7) Epoch 4, batch 2000, loss[loss=0.2099, simple_loss=0.2687, pruned_loss=0.07552, over 4856.00 frames. ], tot_loss[loss=0.234, simple_loss=0.2883, pruned_loss=0.08982, over 953154.93 frames. ], batch size: 44, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:57:14,698 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19190.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:57:17,079 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=19194.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:57:35,166 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=19212.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 02:57:39,345 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.251e+02 1.681e+02 2.009e+02 2.396e+02 5.395e+02, threshold=4.017e+02, percent-clipped=3.0 +2023-03-26 02:57:49,437 INFO [finetune.py:976] (5/7) Epoch 4, batch 2050, loss[loss=0.1759, simple_loss=0.2407, pruned_loss=0.05549, over 4775.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.2834, pruned_loss=0.08741, over 954685.69 frames. ], batch size: 26, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:58:15,529 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5149, 1.5716, 1.5907, 0.9747, 1.5872, 1.8564, 1.8257, 1.4337], + device='cuda:5'), covar=tensor([0.0844, 0.0508, 0.0495, 0.0519, 0.0433, 0.0437, 0.0308, 0.0542], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0156, 0.0117, 0.0135, 0.0132, 0.0120, 0.0146, 0.0143], + device='cuda:5'), out_proj_covar=tensor([9.6425e-05, 1.1566e-04, 8.5399e-05, 9.9323e-05, 9.5645e-05, 8.8833e-05, + 1.0851e-04, 1.0637e-04], device='cuda:5') +2023-03-26 02:58:31,806 INFO [finetune.py:976] (5/7) Epoch 4, batch 2100, loss[loss=0.2412, simple_loss=0.3038, pruned_loss=0.08931, over 4322.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2837, pruned_loss=0.08709, over 954347.76 frames. ], batch size: 65, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:58:34,246 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19286.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:58:49,948 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 02:59:06,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9064, 1.7994, 1.5206, 1.8384, 1.7357, 1.7222, 1.6881, 2.5888], + device='cuda:5'), covar=tensor([0.9280, 1.0434, 0.7339, 0.9994, 0.8577, 0.4687, 0.9772, 0.3113], + device='cuda:5'), in_proj_covar=tensor([0.0276, 0.0252, 0.0219, 0.0283, 0.0234, 0.0195, 0.0239, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 02:59:09,862 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.737e+02 2.057e+02 2.371e+02 3.601e+02, threshold=4.115e+02, percent-clipped=0.0 +2023-03-26 02:59:26,240 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19330.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 02:59:27,979 INFO [finetune.py:976] (5/7) Epoch 4, batch 2150, loss[loss=0.2789, simple_loss=0.324, pruned_loss=0.1169, over 4932.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2871, pruned_loss=0.08848, over 953462.11 frames. ], batch size: 33, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 02:59:45,869 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6695, 1.4766, 2.0655, 3.4207, 2.3946, 2.4515, 0.7334, 2.6880], + device='cuda:5'), covar=tensor([0.1814, 0.1528, 0.1383, 0.0587, 0.0823, 0.1459, 0.2167, 0.0629], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0119, 0.0136, 0.0165, 0.0104, 0.0143, 0.0129, 0.0105], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 03:00:10,913 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=19378.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:00:13,947 INFO [finetune.py:976] (5/7) Epoch 4, batch 2200, loss[loss=0.2224, simple_loss=0.297, pruned_loss=0.07393, over 4912.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2899, pruned_loss=0.08926, over 951141.87 frames. ], batch size: 36, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:00:21,582 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5205, 1.4709, 1.4394, 1.4836, 1.0065, 3.0201, 1.0632, 1.5755], + device='cuda:5'), covar=tensor([0.3542, 0.2458, 0.2151, 0.2404, 0.2053, 0.0244, 0.2655, 0.1360], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0112, 0.0117, 0.0120, 0.0116, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 03:00:33,351 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19394.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:00:55,672 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5896, 1.3207, 1.2660, 1.4837, 1.9470, 1.5213, 1.1841, 1.2741], + device='cuda:5'), covar=tensor([0.2494, 0.2524, 0.2407, 0.2022, 0.1935, 0.1447, 0.2974, 0.2070], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0208, 0.0197, 0.0183, 0.0232, 0.0173, 0.0213, 0.0186], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:01:05,357 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.065e+02 1.822e+02 2.092e+02 2.549e+02 4.918e+02, threshold=4.184e+02, percent-clipped=2.0 +2023-03-26 03:01:15,079 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8473, 1.1952, 1.6609, 1.6098, 1.4259, 1.4178, 1.4939, 1.4831], + device='cuda:5'), covar=tensor([0.7453, 1.1642, 0.8951, 1.0323, 1.1606, 0.8538, 1.3557, 0.8497], + device='cuda:5'), in_proj_covar=tensor([0.0228, 0.0252, 0.0256, 0.0263, 0.0242, 0.0218, 0.0278, 0.0221], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:01:16,385 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5874, 1.6037, 1.8407, 0.9102, 1.7334, 1.9627, 1.9432, 1.5373], + device='cuda:5'), covar=tensor([0.1261, 0.0952, 0.0409, 0.0749, 0.0409, 0.0623, 0.0374, 0.0763], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0117, 0.0136, 0.0131, 0.0120, 0.0145, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.6830e-05, 1.1606e-04, 8.5269e-05, 9.9551e-05, 9.5057e-05, 8.8845e-05, + 1.0837e-04, 1.0647e-04], device='cuda:5') +2023-03-26 03:01:19,367 INFO [finetune.py:976] (5/7) Epoch 4, batch 2250, loss[loss=0.2619, simple_loss=0.3201, pruned_loss=0.1019, over 4717.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.2922, pruned_loss=0.09095, over 950156.64 frames. ], batch size: 54, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:01:25,505 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.60 vs. limit=5.0 +2023-03-26 03:01:46,316 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19455.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:02:09,427 INFO [finetune.py:976] (5/7) Epoch 4, batch 2300, loss[loss=0.264, simple_loss=0.3108, pruned_loss=0.1086, over 4785.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2921, pruned_loss=0.09016, over 949973.56 frames. ], batch size: 51, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:02:12,905 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19485.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 03:02:30,000 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0991, 1.7416, 2.4425, 1.6110, 2.0825, 2.2268, 1.8506, 2.4834], + device='cuda:5'), covar=tensor([0.1410, 0.2227, 0.1331, 0.2179, 0.0947, 0.1614, 0.2320, 0.0955], + device='cuda:5'), in_proj_covar=tensor([0.0208, 0.0208, 0.0204, 0.0198, 0.0183, 0.0227, 0.0216, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:02:37,770 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.125e+02 1.717e+02 2.025e+02 2.639e+02 4.089e+02, threshold=4.050e+02, percent-clipped=0.0 +2023-03-26 03:02:53,438 INFO [finetune.py:976] (5/7) Epoch 4, batch 2350, loss[loss=0.2115, simple_loss=0.2697, pruned_loss=0.07658, over 4863.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2887, pruned_loss=0.08809, over 951353.72 frames. ], batch size: 44, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:02:55,428 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9342, 1.6567, 1.3857, 1.6287, 1.5737, 1.5695, 1.4865, 2.4345], + device='cuda:5'), covar=tensor([0.7772, 0.8273, 0.6698, 0.8369, 0.6847, 0.4495, 0.8546, 0.2548], + device='cuda:5'), in_proj_covar=tensor([0.0277, 0.0253, 0.0220, 0.0284, 0.0236, 0.0197, 0.0241, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 03:03:01,578 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-03-26 03:03:27,688 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-03-26 03:03:37,780 INFO [finetune.py:976] (5/7) Epoch 4, batch 2400, loss[loss=0.1975, simple_loss=0.2606, pruned_loss=0.06717, over 4754.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2857, pruned_loss=0.08713, over 952872.55 frames. ], batch size: 54, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:03:40,261 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19586.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:03:49,312 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0626, 0.9924, 1.0417, 0.3655, 0.7123, 1.1825, 1.2473, 1.0338], + device='cuda:5'), covar=tensor([0.0880, 0.0500, 0.0530, 0.0581, 0.0563, 0.0508, 0.0345, 0.0578], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0157, 0.0118, 0.0136, 0.0132, 0.0121, 0.0146, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.7141e-05, 1.1677e-04, 8.5725e-05, 1.0019e-04, 9.5869e-05, 8.9570e-05, + 1.0914e-04, 1.0718e-04], device='cuda:5') +2023-03-26 03:04:10,058 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19618.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:04:10,539 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.651e+02 1.936e+02 2.390e+02 3.810e+02, threshold=3.872e+02, percent-clipped=0.0 +2023-03-26 03:04:17,832 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7503, 4.0311, 3.9238, 1.9588, 4.1901, 3.0998, 0.9611, 2.8621], + device='cuda:5'), covar=tensor([0.3063, 0.1786, 0.1430, 0.3310, 0.0796, 0.0941, 0.4333, 0.1504], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0170, 0.0163, 0.0129, 0.0155, 0.0122, 0.0145, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 03:04:19,621 INFO [finetune.py:976] (5/7) Epoch 4, batch 2450, loss[loss=0.2001, simple_loss=0.2671, pruned_loss=0.06651, over 4895.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.2818, pruned_loss=0.0856, over 952202.30 frames. ], batch size: 32, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:04:20,302 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=19634.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:04:28,977 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2265, 1.8681, 1.8588, 1.9147, 1.7639, 1.8416, 1.9165, 1.9729], + device='cuda:5'), covar=tensor([0.9759, 1.2095, 1.0085, 1.2457, 1.4066, 0.9710, 1.7054, 0.8855], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0253, 0.0258, 0.0264, 0.0243, 0.0219, 0.0279, 0.0222], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:04:59,950 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19679.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:05:02,277 INFO [finetune.py:976] (5/7) Epoch 4, batch 2500, loss[loss=0.2109, simple_loss=0.2764, pruned_loss=0.07274, over 4903.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.2837, pruned_loss=0.08653, over 953257.36 frames. ], batch size: 37, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:05:30,654 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.296e+02 1.721e+02 2.075e+02 2.470e+02 4.533e+02, threshold=4.150e+02, percent-clipped=4.0 +2023-03-26 03:05:45,256 INFO [finetune.py:976] (5/7) Epoch 4, batch 2550, loss[loss=0.2961, simple_loss=0.3373, pruned_loss=0.1274, over 4815.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2896, pruned_loss=0.08939, over 952818.67 frames. ], batch size: 33, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:05:58,478 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19750.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:06:31,493 INFO [finetune.py:976] (5/7) Epoch 4, batch 2600, loss[loss=0.2655, simple_loss=0.3219, pruned_loss=0.1045, over 4799.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2914, pruned_loss=0.09007, over 951162.36 frames. ], batch size: 40, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:06:33,370 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19785.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 03:07:01,556 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1402, 2.0352, 1.7567, 1.4105, 2.2539, 2.5967, 2.2345, 1.9728], + device='cuda:5'), covar=tensor([0.0329, 0.0441, 0.0559, 0.0436, 0.0274, 0.0459, 0.0331, 0.0439], + device='cuda:5'), in_proj_covar=tensor([0.0085, 0.0115, 0.0138, 0.0118, 0.0104, 0.0100, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.6738e-05, 9.0550e-05, 1.1140e-04, 9.3475e-05, 8.2789e-05, 7.4092e-05, + 7.1048e-05, 8.5824e-05], device='cuda:5') +2023-03-26 03:07:15,921 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.335e+02 1.804e+02 2.222e+02 2.871e+02 4.406e+02, threshold=4.445e+02, percent-clipped=2.0 +2023-03-26 03:07:34,471 INFO [finetune.py:976] (5/7) Epoch 4, batch 2650, loss[loss=0.2148, simple_loss=0.2797, pruned_loss=0.07493, over 4887.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.2933, pruned_loss=0.09129, over 952900.41 frames. ], batch size: 35, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:07:34,540 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=19833.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 03:08:28,510 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6372, 1.5732, 1.4986, 1.6576, 1.1526, 3.5804, 1.4094, 2.1225], + device='cuda:5'), covar=tensor([0.4446, 0.3141, 0.2473, 0.2928, 0.2129, 0.0232, 0.2629, 0.1288], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0112, 0.0116, 0.0120, 0.0117, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 03:08:34,281 INFO [finetune.py:976] (5/7) Epoch 4, batch 2700, loss[loss=0.222, simple_loss=0.279, pruned_loss=0.08245, over 4765.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.2915, pruned_loss=0.08977, over 953388.39 frames. ], batch size: 27, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:09:16,015 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.181e+02 1.716e+02 2.005e+02 2.489e+02 3.950e+02, threshold=4.009e+02, percent-clipped=0.0 +2023-03-26 03:09:26,961 INFO [finetune.py:976] (5/7) Epoch 4, batch 2750, loss[loss=0.1926, simple_loss=0.2524, pruned_loss=0.0664, over 4778.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.2883, pruned_loss=0.08902, over 952059.50 frames. ], batch size: 26, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:09:53,591 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2895, 2.9357, 3.0205, 3.2270, 3.0686, 2.9101, 3.3320, 0.9558], + device='cuda:5'), covar=tensor([0.1061, 0.0855, 0.0962, 0.0994, 0.1457, 0.1527, 0.1067, 0.4816], + device='cuda:5'), in_proj_covar=tensor([0.0361, 0.0246, 0.0278, 0.0295, 0.0341, 0.0287, 0.0310, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:09:54,786 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19974.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:10:00,205 INFO [finetune.py:976] (5/7) Epoch 4, batch 2800, loss[loss=0.1475, simple_loss=0.2158, pruned_loss=0.03963, over 4745.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2844, pruned_loss=0.08738, over 952972.97 frames. ], batch size: 27, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:10:00,802 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8505, 1.2023, 0.8226, 1.7505, 2.1989, 1.5154, 1.5644, 1.8412], + device='cuda:5'), covar=tensor([0.1605, 0.2357, 0.2468, 0.1300, 0.2076, 0.2112, 0.1553, 0.2055], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0100, 0.0118, 0.0094, 0.0126, 0.0098, 0.0101, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 03:10:12,750 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 03:10:13,992 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 03:10:24,996 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.030e+02 1.781e+02 2.091e+02 2.518e+02 3.954e+02, threshold=4.183e+02, percent-clipped=0.0 +2023-03-26 03:10:26,213 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20020.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:10:34,541 INFO [finetune.py:976] (5/7) Epoch 4, batch 2850, loss[loss=0.1941, simple_loss=0.2535, pruned_loss=0.06736, over 4899.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.2823, pruned_loss=0.08624, over 952062.59 frames. ], batch size: 32, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:10:45,850 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20050.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:11:21,582 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20081.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:11:22,667 INFO [finetune.py:976] (5/7) Epoch 4, batch 2900, loss[loss=0.2591, simple_loss=0.3266, pruned_loss=0.09585, over 4846.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.2857, pruned_loss=0.08766, over 951919.38 frames. ], batch size: 49, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:11:31,855 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20089.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:11:42,698 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=20098.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:11:56,173 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20112.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:12:05,859 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.862e+02 2.201e+02 2.748e+02 4.534e+02, threshold=4.402e+02, percent-clipped=1.0 +2023-03-26 03:12:27,410 INFO [finetune.py:976] (5/7) Epoch 4, batch 2950, loss[loss=0.2498, simple_loss=0.3125, pruned_loss=0.09356, over 4822.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.2892, pruned_loss=0.08857, over 952994.35 frames. ], batch size: 40, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:12:48,262 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20150.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:13:07,746 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4210, 2.8758, 2.2807, 1.7336, 2.9411, 2.9264, 2.7437, 2.4494], + device='cuda:5'), covar=tensor([0.0752, 0.0521, 0.0894, 0.1033, 0.0429, 0.0708, 0.0694, 0.0915], + device='cuda:5'), in_proj_covar=tensor([0.0141, 0.0134, 0.0147, 0.0130, 0.0112, 0.0145, 0.0149, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:13:10,776 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20173.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:13:15,356 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2284, 1.1334, 1.3484, 0.5239, 1.0782, 1.4514, 1.5082, 1.2461], + device='cuda:5'), covar=tensor([0.0790, 0.0543, 0.0429, 0.0529, 0.0478, 0.0478, 0.0305, 0.0603], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0160, 0.0119, 0.0139, 0.0134, 0.0123, 0.0149, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.8936e-05, 1.1851e-04, 8.6769e-05, 1.0177e-04, 9.7435e-05, 9.0968e-05, + 1.1136e-04, 1.0857e-04], device='cuda:5') +2023-03-26 03:13:17,094 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7736, 1.6097, 2.1941, 1.5141, 2.0125, 2.0454, 1.5921, 2.2807], + device='cuda:5'), covar=tensor([0.1641, 0.2205, 0.1394, 0.2273, 0.0946, 0.1587, 0.2917, 0.0999], + device='cuda:5'), in_proj_covar=tensor([0.0209, 0.0209, 0.0206, 0.0199, 0.0184, 0.0227, 0.0218, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:13:18,825 INFO [finetune.py:976] (5/7) Epoch 4, batch 3000, loss[loss=0.2574, simple_loss=0.3177, pruned_loss=0.09856, over 4888.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.2909, pruned_loss=0.08913, over 952992.18 frames. ], batch size: 32, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:13:18,826 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 03:13:26,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8214, 0.9740, 1.7116, 1.5328, 1.4702, 1.4221, 1.3534, 1.5539], + device='cuda:5'), covar=tensor([0.6719, 0.9621, 0.8209, 0.8839, 0.9908, 0.7561, 1.1951, 0.7424], + device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0252, 0.0256, 0.0262, 0.0241, 0.0218, 0.0277, 0.0222], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:13:34,007 INFO [finetune.py:1010] (5/7) Epoch 4, validation: loss=0.169, simple_loss=0.2409, pruned_loss=0.04857, over 2265189.00 frames. +2023-03-26 03:13:34,007 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6316MB +2023-03-26 03:13:58,705 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20204.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:14:18,152 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.917e+02 2.134e+02 2.804e+02 4.274e+02, threshold=4.268e+02, percent-clipped=0.0 +2023-03-26 03:14:27,243 INFO [finetune.py:976] (5/7) Epoch 4, batch 3050, loss[loss=0.2269, simple_loss=0.2812, pruned_loss=0.08634, over 4725.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2919, pruned_loss=0.08968, over 953373.66 frames. ], batch size: 54, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:15:06,973 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20265.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:15:16,647 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20274.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:15:24,347 INFO [finetune.py:976] (5/7) Epoch 4, batch 3100, loss[loss=0.2095, simple_loss=0.2716, pruned_loss=0.07364, over 4927.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2883, pruned_loss=0.08801, over 954010.69 frames. ], batch size: 33, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:15:25,027 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7560, 4.0066, 3.9239, 1.7383, 4.2015, 3.1731, 1.0007, 2.9649], + device='cuda:5'), covar=tensor([0.2276, 0.2106, 0.1357, 0.3371, 0.0865, 0.0876, 0.4497, 0.1377], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0171, 0.0163, 0.0129, 0.0156, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 03:16:01,240 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.111e+02 1.549e+02 1.969e+02 2.570e+02 5.632e+02, threshold=3.937e+02, percent-clipped=1.0 +2023-03-26 03:16:03,111 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=20322.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:16:14,159 INFO [finetune.py:976] (5/7) Epoch 4, batch 3150, loss[loss=0.2497, simple_loss=0.2872, pruned_loss=0.1061, over 4900.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2865, pruned_loss=0.08841, over 953388.66 frames. ], batch size: 36, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:16:56,930 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20376.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:16:59,527 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 03:17:01,161 INFO [finetune.py:976] (5/7) Epoch 4, batch 3200, loss[loss=0.1977, simple_loss=0.2481, pruned_loss=0.07362, over 4841.00 frames. ], tot_loss[loss=0.227, simple_loss=0.2817, pruned_loss=0.08612, over 954967.96 frames. ], batch size: 30, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:17:04,832 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20388.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:17:19,409 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5851, 1.4826, 1.4978, 1.5679, 0.9950, 3.0067, 1.0929, 1.5959], + device='cuda:5'), covar=tensor([0.3517, 0.2515, 0.2079, 0.2449, 0.2071, 0.0248, 0.2850, 0.1428], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0112, 0.0116, 0.0120, 0.0117, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 03:17:40,189 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20418.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:17:40,662 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.143e+02 1.625e+02 1.959e+02 2.342e+02 5.079e+02, threshold=3.919e+02, percent-clipped=3.0 +2023-03-26 03:17:41,213 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-03-26 03:17:58,484 INFO [finetune.py:976] (5/7) Epoch 4, batch 3250, loss[loss=0.2244, simple_loss=0.286, pruned_loss=0.08141, over 4737.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2832, pruned_loss=0.08729, over 950949.67 frames. ], batch size: 54, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:18:06,352 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20445.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:18:08,867 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20449.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:18:21,903 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20468.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:18:29,606 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20479.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:18:31,931 INFO [finetune.py:976] (5/7) Epoch 4, batch 3300, loss[loss=0.2134, simple_loss=0.2705, pruned_loss=0.07814, over 4859.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2854, pruned_loss=0.0873, over 952019.12 frames. ], batch size: 31, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:18:36,768 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7156, 1.2027, 0.9914, 1.6827, 2.0161, 1.4193, 1.4744, 1.6850], + device='cuda:5'), covar=tensor([0.1549, 0.2223, 0.2129, 0.1315, 0.2191, 0.2255, 0.1412, 0.1963], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0100, 0.0118, 0.0095, 0.0126, 0.0098, 0.0101, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 03:19:09,458 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.164e+02 1.750e+02 2.039e+02 2.534e+02 4.074e+02, threshold=4.078e+02, percent-clipped=2.0 +2023-03-26 03:19:29,493 INFO [finetune.py:976] (5/7) Epoch 4, batch 3350, loss[loss=0.2412, simple_loss=0.2941, pruned_loss=0.09414, over 4931.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2887, pruned_loss=0.08897, over 950029.92 frames. ], batch size: 33, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:19:59,035 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20560.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:20:15,684 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3818, 1.4134, 1.4316, 0.7312, 1.5878, 1.4925, 1.3848, 1.3197], + device='cuda:5'), covar=tensor([0.0745, 0.0755, 0.0753, 0.1075, 0.0744, 0.0811, 0.0730, 0.1235], + device='cuda:5'), in_proj_covar=tensor([0.0141, 0.0134, 0.0147, 0.0130, 0.0112, 0.0146, 0.0149, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:20:17,268 INFO [finetune.py:976] (5/7) Epoch 4, batch 3400, loss[loss=0.192, simple_loss=0.2487, pruned_loss=0.06767, over 4754.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.2918, pruned_loss=0.09043, over 948727.10 frames. ], batch size: 23, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:20:20,437 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20588.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:20:46,763 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.735e+02 2.048e+02 2.538e+02 3.974e+02, threshold=4.096e+02, percent-clipped=0.0 +2023-03-26 03:20:47,519 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8167, 1.6810, 1.6246, 1.8315, 2.1920, 1.7465, 1.5043, 1.6184], + device='cuda:5'), covar=tensor([0.2036, 0.2060, 0.1721, 0.1697, 0.1762, 0.1200, 0.2511, 0.1704], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0209, 0.0197, 0.0184, 0.0234, 0.0174, 0.0214, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:21:05,097 INFO [finetune.py:976] (5/7) Epoch 4, batch 3450, loss[loss=0.2, simple_loss=0.2762, pruned_loss=0.06189, over 4814.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.2932, pruned_loss=0.09157, over 948031.29 frames. ], batch size: 38, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:21:22,517 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20649.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:21:45,702 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20676.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:21:51,865 INFO [finetune.py:976] (5/7) Epoch 4, batch 3500, loss[loss=0.214, simple_loss=0.2802, pruned_loss=0.07395, over 4916.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2906, pruned_loss=0.09085, over 949524.96 frames. ], batch size: 46, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:22:31,512 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.688e+02 2.022e+02 2.523e+02 5.341e+02, threshold=4.043e+02, percent-clipped=2.0 +2023-03-26 03:22:35,129 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=20724.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:22:43,661 INFO [finetune.py:976] (5/7) Epoch 4, batch 3550, loss[loss=0.2113, simple_loss=0.2688, pruned_loss=0.07686, over 4857.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.286, pruned_loss=0.0884, over 951119.04 frames. ], batch size: 44, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:22:56,120 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20744.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:22:56,765 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20745.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:23:24,106 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20768.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:23:33,932 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20774.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:23:41,429 INFO [finetune.py:976] (5/7) Epoch 4, batch 3600, loss[loss=0.2565, simple_loss=0.3142, pruned_loss=0.09943, over 4916.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.2817, pruned_loss=0.08638, over 952550.93 frames. ], batch size: 46, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:23:58,257 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=20793.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:24:24,362 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=20816.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:24:31,495 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.828e+01 1.831e+02 2.152e+02 2.506e+02 5.159e+02, threshold=4.304e+02, percent-clipped=1.0 +2023-03-26 03:24:35,572 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0564, 0.8990, 1.0282, 0.2083, 0.6723, 1.1627, 1.2310, 1.0279], + device='cuda:5'), covar=tensor([0.1038, 0.0689, 0.0525, 0.0766, 0.0653, 0.0497, 0.0449, 0.0688], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0157, 0.0119, 0.0137, 0.0133, 0.0122, 0.0147, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.7847e-05, 1.1693e-04, 8.6330e-05, 1.0028e-04, 9.5923e-05, 9.0335e-05, + 1.0995e-04, 1.0741e-04], device='cuda:5') +2023-03-26 03:24:50,038 INFO [finetune.py:976] (5/7) Epoch 4, batch 3650, loss[loss=0.2333, simple_loss=0.2871, pruned_loss=0.08974, over 4776.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2831, pruned_loss=0.08657, over 954025.34 frames. ], batch size: 28, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:25:24,813 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20860.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:25:40,629 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-26 03:25:52,758 INFO [finetune.py:976] (5/7) Epoch 4, batch 3700, loss[loss=0.2682, simple_loss=0.3079, pruned_loss=0.1142, over 4829.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.2865, pruned_loss=0.08736, over 952489.35 frames. ], batch size: 33, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:26:17,162 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=20908.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:26:24,269 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.710e+02 2.022e+02 2.626e+02 5.956e+02, threshold=4.044e+02, percent-clipped=2.0 +2023-03-26 03:26:34,612 INFO [finetune.py:976] (5/7) Epoch 4, batch 3750, loss[loss=0.234, simple_loss=0.2892, pruned_loss=0.0894, over 4848.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2887, pruned_loss=0.08801, over 954058.79 frames. ], batch size: 44, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:26:46,570 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20944.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:27:33,087 INFO [finetune.py:976] (5/7) Epoch 4, batch 3800, loss[loss=0.244, simple_loss=0.3047, pruned_loss=0.09169, over 4893.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.2907, pruned_loss=0.0888, over 954808.61 frames. ], batch size: 43, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:28:14,227 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.718e+02 1.983e+02 2.372e+02 3.493e+02, threshold=3.966e+02, percent-clipped=0.0 +2023-03-26 03:28:29,874 INFO [finetune.py:976] (5/7) Epoch 4, batch 3850, loss[loss=0.2757, simple_loss=0.3235, pruned_loss=0.114, over 4854.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.2887, pruned_loss=0.08792, over 955135.51 frames. ], batch size: 31, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:28:37,703 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21044.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:28:45,296 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21052.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:29:08,447 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21074.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:29:19,704 INFO [finetune.py:976] (5/7) Epoch 4, batch 3900, loss[loss=0.2252, simple_loss=0.2794, pruned_loss=0.08548, over 4903.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2857, pruned_loss=0.08719, over 955220.40 frames. ], batch size: 43, lr: 3.96e-03, grad_scale: 64.0 +2023-03-26 03:29:20,418 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8324, 1.6766, 1.6337, 1.8178, 0.9795, 4.3191, 1.6501, 2.3352], + device='cuda:5'), covar=tensor([0.3191, 0.2282, 0.1959, 0.2089, 0.2002, 0.0097, 0.2377, 0.1218], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0113, 0.0117, 0.0120, 0.0117, 0.0098, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 03:29:26,285 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0686, 1.5637, 1.7397, 1.8346, 1.5781, 1.6803, 1.7836, 1.6220], + device='cuda:5'), covar=tensor([0.7847, 1.1080, 0.8661, 1.0157, 1.1368, 0.7978, 1.2656, 0.8191], + device='cuda:5'), in_proj_covar=tensor([0.0228, 0.0250, 0.0255, 0.0261, 0.0240, 0.0217, 0.0276, 0.0220], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:5') +2023-03-26 03:29:26,812 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=21092.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:29:43,705 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21113.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:29:46,787 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21118.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:29:47,847 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.709e+02 1.984e+02 2.370e+02 6.134e+02, threshold=3.968e+02, percent-clipped=2.0 +2023-03-26 03:29:49,147 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=21122.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:29:50,438 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6165, 1.5126, 1.8743, 1.2843, 1.6188, 1.8028, 1.5153, 2.0310], + device='cuda:5'), covar=tensor([0.1534, 0.2322, 0.1309, 0.1937, 0.1205, 0.1588, 0.2825, 0.0950], + device='cuda:5'), in_proj_covar=tensor([0.0206, 0.0207, 0.0204, 0.0197, 0.0183, 0.0226, 0.0215, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:29:59,344 INFO [finetune.py:976] (5/7) Epoch 4, batch 3950, loss[loss=0.2185, simple_loss=0.271, pruned_loss=0.08297, over 4932.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.282, pruned_loss=0.08517, over 955402.06 frames. ], batch size: 38, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:30:47,947 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21179.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:30:50,288 INFO [finetune.py:976] (5/7) Epoch 4, batch 4000, loss[loss=0.2283, simple_loss=0.2925, pruned_loss=0.082, over 4900.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2801, pruned_loss=0.08442, over 954156.97 frames. ], batch size: 35, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:31:19,053 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.740e+02 2.148e+02 2.427e+02 4.357e+02, threshold=4.296e+02, percent-clipped=1.0 +2023-03-26 03:31:27,483 INFO [finetune.py:976] (5/7) Epoch 4, batch 4050, loss[loss=0.2548, simple_loss=0.3051, pruned_loss=0.1022, over 4855.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.284, pruned_loss=0.08639, over 953406.93 frames. ], batch size: 31, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:31:27,669 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.72 vs. limit=5.0 +2023-03-26 03:31:35,245 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21244.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:31:36,463 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 03:32:12,120 INFO [finetune.py:976] (5/7) Epoch 4, batch 4100, loss[loss=0.1962, simple_loss=0.2629, pruned_loss=0.06473, over 4889.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.2875, pruned_loss=0.08786, over 953124.75 frames. ], batch size: 32, lr: 3.96e-03, grad_scale: 32.0 +2023-03-26 03:32:14,057 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4022, 1.3180, 1.5849, 2.4696, 1.6781, 2.0741, 0.7803, 1.9930], + device='cuda:5'), covar=tensor([0.1936, 0.1555, 0.1250, 0.0690, 0.0983, 0.1155, 0.1738, 0.0823], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0119, 0.0138, 0.0168, 0.0105, 0.0144, 0.0130, 0.0105], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 03:32:23,704 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=21292.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:32:44,676 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21312.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:32:53,586 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.825e+02 2.074e+02 2.571e+02 5.101e+02, threshold=4.147e+02, percent-clipped=2.0 +2023-03-26 03:33:04,204 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21328.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:33:07,022 INFO [finetune.py:976] (5/7) Epoch 4, batch 4150, loss[loss=0.202, simple_loss=0.2675, pruned_loss=0.06825, over 4758.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.2894, pruned_loss=0.08849, over 954285.07 frames. ], batch size: 26, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:33:45,530 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21373.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:33:52,598 INFO [finetune.py:976] (5/7) Epoch 4, batch 4200, loss[loss=0.2704, simple_loss=0.3238, pruned_loss=0.1085, over 4890.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2895, pruned_loss=0.08769, over 955333.10 frames. ], batch size: 35, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:34:02,011 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21389.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:34:12,107 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5767, 1.5329, 1.3147, 1.3367, 1.7601, 1.7723, 1.6206, 1.4426], + device='cuda:5'), covar=tensor([0.0282, 0.0310, 0.0491, 0.0360, 0.0254, 0.0483, 0.0329, 0.0351], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0115, 0.0139, 0.0119, 0.0106, 0.0100, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.7522e-05, 9.0568e-05, 1.1209e-04, 9.3821e-05, 8.4031e-05, 7.4596e-05, + 7.0504e-05, 8.6157e-05], device='cuda:5') +2023-03-26 03:34:21,944 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21408.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:34:29,742 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9003, 1.8515, 1.6601, 1.3422, 1.9905, 2.2140, 2.0657, 1.8557], + device='cuda:5'), covar=tensor([0.0313, 0.0365, 0.0508, 0.0409, 0.0362, 0.0427, 0.0294, 0.0325], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0115, 0.0139, 0.0119, 0.0106, 0.0100, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.7430e-05, 9.0452e-05, 1.1195e-04, 9.3719e-05, 8.3930e-05, 7.4492e-05, + 7.0402e-05, 8.6039e-05], device='cuda:5') +2023-03-26 03:34:39,318 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.013e+02 1.725e+02 2.024e+02 2.533e+02 3.913e+02, threshold=4.049e+02, percent-clipped=0.0 +2023-03-26 03:34:50,766 INFO [finetune.py:976] (5/7) Epoch 4, batch 4250, loss[loss=0.2154, simple_loss=0.2742, pruned_loss=0.0783, over 4854.00 frames. ], tot_loss[loss=0.229, simple_loss=0.2861, pruned_loss=0.08594, over 957315.80 frames. ], batch size: 47, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:35:18,394 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21474.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:35:24,284 INFO [finetune.py:976] (5/7) Epoch 4, batch 4300, loss[loss=0.196, simple_loss=0.2541, pruned_loss=0.06893, over 4848.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2822, pruned_loss=0.08445, over 955101.15 frames. ], batch size: 49, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:35:53,226 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.244e+02 1.671e+02 2.065e+02 2.560e+02 4.445e+02, threshold=4.130e+02, percent-clipped=1.0 +2023-03-26 03:36:01,104 INFO [finetune.py:976] (5/7) Epoch 4, batch 4350, loss[loss=0.2191, simple_loss=0.2766, pruned_loss=0.08082, over 4819.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2784, pruned_loss=0.08242, over 955723.43 frames. ], batch size: 39, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:36:40,603 INFO [finetune.py:976] (5/7) Epoch 4, batch 4400, loss[loss=0.1978, simple_loss=0.2467, pruned_loss=0.07445, over 4153.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.28, pruned_loss=0.08361, over 955785.98 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:36:49,003 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21595.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:36:49,354 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 03:36:54,568 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9518, 1.8430, 1.7364, 2.0011, 2.4328, 1.9536, 1.7335, 1.4743], + device='cuda:5'), covar=tensor([0.2316, 0.2277, 0.2014, 0.1881, 0.2222, 0.1236, 0.2776, 0.1940], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0208, 0.0198, 0.0184, 0.0235, 0.0174, 0.0214, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:37:05,925 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21613.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:37:15,013 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.167e+02 1.721e+02 2.036e+02 2.627e+02 4.967e+02, threshold=4.072e+02, percent-clipped=2.0 +2023-03-26 03:37:17,076 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.71 vs. limit=2.0 +2023-03-26 03:37:22,899 INFO [finetune.py:976] (5/7) Epoch 4, batch 4450, loss[loss=0.2212, simple_loss=0.2885, pruned_loss=0.07696, over 4750.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.2836, pruned_loss=0.08488, over 954848.12 frames. ], batch size: 28, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:37:38,566 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21656.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:37:51,492 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21668.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:38:01,169 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21674.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:38:12,008 INFO [finetune.py:976] (5/7) Epoch 4, batch 4500, loss[loss=0.2382, simple_loss=0.3026, pruned_loss=0.08688, over 4893.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2859, pruned_loss=0.08594, over 953916.23 frames. ], batch size: 35, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:38:12,682 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21684.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:38:41,406 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21708.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:38:49,079 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.721e+02 2.032e+02 2.543e+02 5.339e+02, threshold=4.063e+02, percent-clipped=3.0 +2023-03-26 03:38:58,599 INFO [finetune.py:976] (5/7) Epoch 4, batch 4550, loss[loss=0.2829, simple_loss=0.3315, pruned_loss=0.1172, over 4724.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2883, pruned_loss=0.08759, over 953812.71 frames. ], batch size: 59, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:39:14,118 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=21756.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:39:18,455 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21763.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:39:30,535 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21774.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:39:41,974 INFO [finetune.py:976] (5/7) Epoch 4, batch 4600, loss[loss=0.2197, simple_loss=0.2732, pruned_loss=0.08309, over 4811.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.2873, pruned_loss=0.08697, over 953504.15 frames. ], batch size: 25, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:40:04,761 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-03-26 03:40:25,682 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.219e+02 1.789e+02 2.167e+02 2.687e+02 4.147e+02, threshold=4.334e+02, percent-clipped=1.0 +2023-03-26 03:40:32,332 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=21822.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:40:33,583 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21824.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:40:44,877 INFO [finetune.py:976] (5/7) Epoch 4, batch 4650, loss[loss=0.2077, simple_loss=0.2643, pruned_loss=0.07561, over 4694.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.2852, pruned_loss=0.08661, over 954358.97 frames. ], batch size: 23, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:41:06,208 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21850.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:41:28,342 INFO [finetune.py:976] (5/7) Epoch 4, batch 4700, loss[loss=0.2079, simple_loss=0.2642, pruned_loss=0.07581, over 4749.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2813, pruned_loss=0.08511, over 954856.09 frames. ], batch size: 54, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:42:02,078 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21911.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:42:08,440 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.749e+02 2.081e+02 2.546e+02 7.973e+02, threshold=4.162e+02, percent-clipped=1.0 +2023-03-26 03:42:16,911 INFO [finetune.py:976] (5/7) Epoch 4, batch 4750, loss[loss=0.2204, simple_loss=0.2847, pruned_loss=0.07801, over 4834.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.2785, pruned_loss=0.08399, over 954475.67 frames. ], batch size: 47, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:42:20,036 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0867, 1.8943, 1.6399, 1.8500, 1.8613, 1.8212, 1.7590, 2.5907], + device='cuda:5'), covar=tensor([0.7259, 0.7111, 0.5774, 0.7069, 0.5838, 0.4172, 0.7442, 0.2564], + device='cuda:5'), in_proj_covar=tensor([0.0280, 0.0255, 0.0221, 0.0285, 0.0238, 0.0199, 0.0243, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:42:26,204 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.86 vs. limit=5.0 +2023-03-26 03:42:28,909 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5859, 1.5202, 1.8583, 1.7676, 1.6428, 3.5149, 1.3669, 1.6555], + device='cuda:5'), covar=tensor([0.0979, 0.1747, 0.1176, 0.1085, 0.1677, 0.0241, 0.1524, 0.1721], + device='cuda:5'), in_proj_covar=tensor([0.0079, 0.0082, 0.0078, 0.0081, 0.0093, 0.0084, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 03:42:29,509 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21951.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:42:46,671 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21968.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:42:53,032 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21969.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:42:53,721 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21970.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:43:02,050 INFO [finetune.py:976] (5/7) Epoch 4, batch 4800, loss[loss=0.2159, simple_loss=0.267, pruned_loss=0.0824, over 4785.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2797, pruned_loss=0.08431, over 953653.99 frames. ], batch size: 26, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:43:02,808 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21984.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:43:34,456 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22016.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:43:37,299 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.143e+02 1.769e+02 1.987e+02 2.631e+02 5.032e+02, threshold=3.974e+02, percent-clipped=2.0 +2023-03-26 03:43:41,847 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.63 vs. limit=5.0 +2023-03-26 03:43:44,133 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22031.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:43:44,660 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22032.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:43:45,185 INFO [finetune.py:976] (5/7) Epoch 4, batch 4850, loss[loss=0.2469, simple_loss=0.2976, pruned_loss=0.09804, over 4762.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2847, pruned_loss=0.08578, over 954151.81 frames. ], batch size: 54, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:44:08,421 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8827, 1.9767, 1.8777, 1.2747, 2.1438, 1.9978, 2.0114, 1.7084], + device='cuda:5'), covar=tensor([0.0629, 0.0634, 0.0729, 0.0978, 0.0499, 0.0729, 0.0653, 0.0971], + device='cuda:5'), in_proj_covar=tensor([0.0137, 0.0132, 0.0144, 0.0127, 0.0109, 0.0143, 0.0146, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:44:19,577 INFO [finetune.py:976] (5/7) Epoch 4, batch 4900, loss[loss=0.2458, simple_loss=0.3065, pruned_loss=0.09253, over 4745.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2868, pruned_loss=0.08684, over 952939.11 frames. ], batch size: 54, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:44:58,192 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1682, 2.2547, 2.2155, 1.4425, 2.4796, 2.3761, 2.3065, 1.9847], + device='cuda:5'), covar=tensor([0.0634, 0.0604, 0.0696, 0.1007, 0.0433, 0.0721, 0.0658, 0.0946], + device='cuda:5'), in_proj_covar=tensor([0.0138, 0.0133, 0.0145, 0.0128, 0.0110, 0.0144, 0.0148, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:44:58,780 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1196, 2.2545, 2.1083, 1.4780, 2.3228, 2.2743, 2.1980, 1.9125], + device='cuda:5'), covar=tensor([0.0696, 0.0569, 0.0805, 0.1027, 0.0467, 0.0819, 0.0700, 0.0998], + device='cuda:5'), in_proj_covar=tensor([0.0138, 0.0133, 0.0145, 0.0128, 0.0110, 0.0144, 0.0148, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:45:00,548 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22119.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:45:01,042 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.764e+02 2.232e+02 2.515e+02 4.523e+02, threshold=4.464e+02, percent-clipped=3.0 +2023-03-26 03:45:09,149 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5066, 1.1571, 1.3020, 1.2247, 1.5902, 1.6408, 1.4797, 1.2705], + device='cuda:5'), covar=tensor([0.0273, 0.0424, 0.0625, 0.0365, 0.0266, 0.0422, 0.0317, 0.0438], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0114, 0.0138, 0.0119, 0.0105, 0.0100, 0.0092, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.7291e-05, 9.0242e-05, 1.1125e-04, 9.3615e-05, 8.3571e-05, 7.4474e-05, + 7.0473e-05, 8.5513e-05], device='cuda:5') +2023-03-26 03:45:18,656 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22130.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:45:20,459 INFO [finetune.py:976] (5/7) Epoch 4, batch 4950, loss[loss=0.2183, simple_loss=0.2819, pruned_loss=0.07736, over 4844.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2891, pruned_loss=0.08786, over 954000.15 frames. ], batch size: 44, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:45:29,841 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.7591, 4.0780, 4.3078, 4.5965, 4.4647, 4.1993, 4.8489, 1.5971], + device='cuda:5'), covar=tensor([0.0681, 0.0838, 0.0742, 0.0796, 0.1154, 0.1292, 0.0512, 0.5123], + device='cuda:5'), in_proj_covar=tensor([0.0363, 0.0246, 0.0279, 0.0295, 0.0340, 0.0287, 0.0309, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:45:50,308 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4627, 1.9007, 1.6291, 0.6620, 1.8449, 1.9210, 1.4991, 1.8215], + device='cuda:5'), covar=tensor([0.0866, 0.1530, 0.2080, 0.2683, 0.1800, 0.2530, 0.3085, 0.1366], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0202, 0.0205, 0.0192, 0.0220, 0.0211, 0.0222, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:46:12,769 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22176.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:46:12,804 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8995, 1.0451, 1.6762, 1.6169, 1.4724, 1.4423, 1.5161, 1.5128], + device='cuda:5'), covar=tensor([0.5674, 0.8530, 0.6980, 0.7591, 0.8751, 0.6681, 0.9918, 0.6767], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0252, 0.0257, 0.0262, 0.0243, 0.0219, 0.0279, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:46:16,747 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 03:46:19,510 INFO [finetune.py:976] (5/7) Epoch 4, batch 5000, loss[loss=0.218, simple_loss=0.2794, pruned_loss=0.07836, over 4880.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2861, pruned_loss=0.08639, over 952564.83 frames. ], batch size: 32, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:46:24,494 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22191.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:46:35,050 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22206.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:46:40,550 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22215.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:46:43,478 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.598e+02 2.028e+02 2.482e+02 4.524e+02, threshold=4.056e+02, percent-clipped=1.0 +2023-03-26 03:46:59,735 INFO [finetune.py:976] (5/7) Epoch 4, batch 5050, loss[loss=0.2162, simple_loss=0.2762, pruned_loss=0.0781, over 4805.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2842, pruned_loss=0.08601, over 955264.37 frames. ], batch size: 45, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:47:02,285 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22237.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:47:02,294 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22237.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:47:16,634 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22251.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:47:28,160 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22269.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:47:32,944 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22276.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:47:40,347 INFO [finetune.py:976] (5/7) Epoch 4, batch 5100, loss[loss=0.1667, simple_loss=0.2261, pruned_loss=0.05363, over 4825.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2815, pruned_loss=0.08519, over 954132.29 frames. ], batch size: 30, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:47:42,692 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6922, 3.3854, 3.3254, 1.4797, 3.5368, 2.4999, 0.6611, 2.2553], + device='cuda:5'), covar=tensor([0.2217, 0.1974, 0.1558, 0.3573, 0.1244, 0.1126, 0.4667, 0.1773], + device='cuda:5'), in_proj_covar=tensor([0.0157, 0.0173, 0.0164, 0.0130, 0.0157, 0.0124, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 03:47:49,920 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22298.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:47:50,458 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22299.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:48:01,592 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 03:48:03,281 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22317.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:48:05,002 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.872e+01 1.620e+02 1.853e+02 2.165e+02 3.345e+02, threshold=3.706e+02, percent-clipped=0.0 +2023-03-26 03:48:08,734 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22326.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:48:13,376 INFO [finetune.py:976] (5/7) Epoch 4, batch 5150, loss[loss=0.2828, simple_loss=0.3245, pruned_loss=0.1205, over 4911.00 frames. ], tot_loss[loss=0.226, simple_loss=0.2817, pruned_loss=0.08514, over 955060.12 frames. ], batch size: 35, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:48:51,907 INFO [finetune.py:976] (5/7) Epoch 4, batch 5200, loss[loss=0.2458, simple_loss=0.2972, pruned_loss=0.0972, over 4772.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.2845, pruned_loss=0.08614, over 954674.16 frames. ], batch size: 59, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:48:53,221 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22385.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:49:24,926 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 03:49:26,404 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22419.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:49:26,883 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.811e+02 2.155e+02 2.737e+02 4.498e+02, threshold=4.310e+02, percent-clipped=4.0 +2023-03-26 03:49:40,613 INFO [finetune.py:976] (5/7) Epoch 4, batch 5250, loss[loss=0.2766, simple_loss=0.3314, pruned_loss=0.1109, over 4844.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.2888, pruned_loss=0.08742, over 956800.44 frames. ], batch size: 47, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:49:54,914 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22446.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:50:18,145 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22467.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:50:27,829 INFO [finetune.py:976] (5/7) Epoch 4, batch 5300, loss[loss=0.2923, simple_loss=0.327, pruned_loss=0.1288, over 4822.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.29, pruned_loss=0.08879, over 954945.55 frames. ], batch size: 38, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:50:30,466 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22486.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:50:49,038 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22506.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:51:10,350 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.644e+02 2.088e+02 2.525e+02 4.526e+02, threshold=4.176e+02, percent-clipped=1.0 +2023-03-26 03:51:29,037 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22532.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:51:29,575 INFO [finetune.py:976] (5/7) Epoch 4, batch 5350, loss[loss=0.1717, simple_loss=0.2342, pruned_loss=0.05463, over 4748.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.2886, pruned_loss=0.08728, over 952551.14 frames. ], batch size: 27, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:51:41,628 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5653, 1.0481, 0.8185, 1.4450, 1.9930, 0.6952, 1.3180, 1.4370], + device='cuda:5'), covar=tensor([0.1676, 0.2358, 0.2029, 0.1304, 0.2069, 0.2113, 0.1579, 0.2262], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0117, 0.0093, 0.0125, 0.0097, 0.0101, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 03:51:43,432 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22554.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:52:10,733 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22571.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:52:12,078 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9466, 2.5324, 2.4352, 1.2962, 2.6271, 2.1641, 2.0098, 2.3179], + device='cuda:5'), covar=tensor([0.0956, 0.0909, 0.1437, 0.2346, 0.1696, 0.2047, 0.1962, 0.1193], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0203, 0.0190, 0.0217, 0.0209, 0.0219, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:52:20,637 INFO [finetune.py:976] (5/7) Epoch 4, batch 5400, loss[loss=0.2295, simple_loss=0.2696, pruned_loss=0.09471, over 4757.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2849, pruned_loss=0.08567, over 951926.59 frames. ], batch size: 26, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:52:27,237 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22593.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:52:29,117 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5674, 1.2718, 1.3702, 1.3062, 1.6541, 1.6586, 1.4895, 1.3482], + device='cuda:5'), covar=tensor([0.0293, 0.0333, 0.0502, 0.0361, 0.0258, 0.0415, 0.0358, 0.0413], + device='cuda:5'), in_proj_covar=tensor([0.0087, 0.0115, 0.0140, 0.0120, 0.0106, 0.0101, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.8026e-05, 9.0713e-05, 1.1229e-04, 9.4762e-05, 8.4258e-05, 7.5419e-05, + 7.0943e-05, 8.6342e-05], device='cuda:5') +2023-03-26 03:52:45,274 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.025e+02 1.598e+02 1.961e+02 2.261e+02 4.832e+02, threshold=3.922e+02, percent-clipped=2.0 +2023-03-26 03:52:50,431 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22626.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:52:53,547 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 03:52:54,547 INFO [finetune.py:976] (5/7) Epoch 4, batch 5450, loss[loss=0.207, simple_loss=0.2639, pruned_loss=0.07509, over 4763.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.2826, pruned_loss=0.08503, over 953379.39 frames. ], batch size: 28, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:52:57,697 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3000, 1.4033, 0.9602, 2.1517, 2.6742, 1.9621, 1.9644, 2.2227], + device='cuda:5'), covar=tensor([0.1392, 0.2116, 0.2323, 0.1160, 0.1682, 0.1809, 0.1320, 0.1929], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0118, 0.0094, 0.0125, 0.0097, 0.0101, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 03:52:58,315 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22639.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:52:58,908 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7952, 1.2020, 0.9302, 1.6254, 2.1586, 1.2804, 1.4945, 1.7368], + device='cuda:5'), covar=tensor([0.1549, 0.2231, 0.2258, 0.1327, 0.1924, 0.2130, 0.1477, 0.1958], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0118, 0.0094, 0.0125, 0.0097, 0.0101, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 03:53:25,509 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8943, 1.7075, 1.4383, 1.5201, 1.5404, 1.5369, 1.5485, 2.2901], + device='cuda:5'), covar=tensor([0.7038, 0.7471, 0.5883, 0.7696, 0.6736, 0.4260, 0.7728, 0.2783], + device='cuda:5'), in_proj_covar=tensor([0.0279, 0.0254, 0.0220, 0.0284, 0.0237, 0.0198, 0.0242, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:53:28,475 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5937, 1.5970, 2.0832, 1.8939, 1.8194, 4.3939, 1.6008, 1.9372], + device='cuda:5'), covar=tensor([0.1005, 0.1801, 0.1150, 0.1081, 0.1635, 0.0165, 0.1434, 0.1689], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0082, 0.0078, 0.0080, 0.0093, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 03:53:30,708 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22674.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:53:37,513 INFO [finetune.py:976] (5/7) Epoch 4, batch 5500, loss[loss=0.227, simple_loss=0.2852, pruned_loss=0.0844, over 4833.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2796, pruned_loss=0.08376, over 953498.98 frames. ], batch size: 41, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:53:48,424 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22700.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 03:53:49,684 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.10 vs. limit=5.0 +2023-03-26 03:54:00,993 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.787e+02 2.088e+02 2.580e+02 6.017e+02, threshold=4.176e+02, percent-clipped=5.0 +2023-03-26 03:54:16,190 INFO [finetune.py:976] (5/7) Epoch 4, batch 5550, loss[loss=0.2364, simple_loss=0.2976, pruned_loss=0.08759, over 4756.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2816, pruned_loss=0.08475, over 955192.19 frames. ], batch size: 27, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:54:23,405 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22741.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:54:35,932 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0878, 1.2948, 1.0944, 1.3341, 1.3927, 2.4444, 1.2072, 1.4587], + device='cuda:5'), covar=tensor([0.1042, 0.1815, 0.1267, 0.1080, 0.1727, 0.0381, 0.1590, 0.1771], + device='cuda:5'), in_proj_covar=tensor([0.0079, 0.0082, 0.0078, 0.0081, 0.0094, 0.0084, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 03:54:55,507 INFO [finetune.py:976] (5/7) Epoch 4, batch 5600, loss[loss=0.263, simple_loss=0.3082, pruned_loss=0.1089, over 4245.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.2845, pruned_loss=0.085, over 954404.96 frames. ], batch size: 65, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:54:57,306 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22786.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:55:01,946 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22794.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:55:28,583 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.750e+02 2.147e+02 2.459e+02 4.993e+02, threshold=4.295e+02, percent-clipped=1.0 +2023-03-26 03:55:35,791 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22825.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:55:39,566 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1332, 1.8810, 1.5407, 2.0221, 1.9234, 1.7543, 1.7861, 2.8145], + device='cuda:5'), covar=tensor([0.7915, 0.9624, 0.6735, 0.9094, 0.7522, 0.4944, 0.8983, 0.2708], + device='cuda:5'), in_proj_covar=tensor([0.0278, 0.0253, 0.0219, 0.0283, 0.0236, 0.0198, 0.0241, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:55:40,660 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22832.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:55:41,215 INFO [finetune.py:976] (5/7) Epoch 4, batch 5650, loss[loss=0.1982, simple_loss=0.2431, pruned_loss=0.07664, over 4022.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2867, pruned_loss=0.08551, over 953850.05 frames. ], batch size: 17, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:55:41,979 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22834.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:55:48,916 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 03:56:06,180 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22855.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:56:13,291 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3314, 2.2361, 2.6909, 2.5926, 2.5550, 4.6803, 2.1568, 2.4998], + device='cuda:5'), covar=tensor([0.0789, 0.1392, 0.0919, 0.0810, 0.1211, 0.0267, 0.1155, 0.1284], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0082, 0.0078, 0.0080, 0.0093, 0.0084, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 03:56:21,394 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22871.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:56:31,134 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22880.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:56:33,111 INFO [finetune.py:976] (5/7) Epoch 4, batch 5700, loss[loss=0.2153, simple_loss=0.2705, pruned_loss=0.08, over 4493.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.2827, pruned_loss=0.08491, over 934168.47 frames. ], batch size: 19, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:56:34,951 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22886.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:56:41,775 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22893.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:57:01,479 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 03:57:20,682 INFO [finetune.py:976] (5/7) Epoch 5, batch 0, loss[loss=0.2594, simple_loss=0.3109, pruned_loss=0.104, over 4897.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3109, pruned_loss=0.104, over 4897.00 frames. ], batch size: 36, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:57:20,682 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 03:57:36,906 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5765, 1.2273, 1.4069, 1.3017, 1.6909, 1.6435, 1.5482, 1.3603], + device='cuda:5'), covar=tensor([0.0332, 0.0349, 0.0538, 0.0339, 0.0267, 0.0370, 0.0323, 0.0431], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0115, 0.0138, 0.0120, 0.0105, 0.0101, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.7570e-05, 9.0440e-05, 1.1130e-04, 9.4555e-05, 8.3660e-05, 7.5062e-05, + 7.0309e-05, 8.6112e-05], device='cuda:5') +2023-03-26 03:57:37,526 INFO [finetune.py:1010] (5/7) Epoch 5, validation: loss=0.1701, simple_loss=0.2413, pruned_loss=0.0494, over 2265189.00 frames. +2023-03-26 03:57:37,527 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 03:57:47,691 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8417, 1.1981, 1.6739, 1.6003, 1.4712, 1.4965, 1.5135, 1.5757], + device='cuda:5'), covar=tensor([0.5908, 0.8905, 0.7129, 0.8244, 0.8503, 0.6419, 1.0267, 0.6632], + device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0251, 0.0257, 0.0260, 0.0240, 0.0218, 0.0276, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 03:57:48,821 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22919.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:57:49,348 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.844e+01 1.616e+02 1.840e+02 2.309e+02 3.969e+02, threshold=3.680e+02, percent-clipped=0.0 +2023-03-26 03:58:02,584 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=22941.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:58:03,887 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4679, 1.3179, 1.3322, 0.7717, 1.3794, 1.5945, 1.5676, 1.2728], + device='cuda:5'), covar=tensor([0.0937, 0.0666, 0.0665, 0.0639, 0.0499, 0.0395, 0.0366, 0.0628], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0117, 0.0135, 0.0131, 0.0121, 0.0147, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.6854e-05, 1.1627e-04, 8.5080e-05, 9.8605e-05, 9.5036e-05, 8.9545e-05, + 1.0973e-04, 1.0650e-04], device='cuda:5') +2023-03-26 03:58:29,015 INFO [finetune.py:976] (5/7) Epoch 5, batch 50, loss[loss=0.1978, simple_loss=0.2475, pruned_loss=0.07407, over 4801.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.2898, pruned_loss=0.08822, over 216261.11 frames. ], batch size: 25, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:59:04,517 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6385, 1.4642, 1.9230, 1.9822, 1.6187, 3.4825, 1.3896, 1.5881], + device='cuda:5'), covar=tensor([0.0934, 0.1819, 0.1152, 0.1065, 0.1689, 0.0266, 0.1532, 0.1707], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0082, 0.0078, 0.0080, 0.0093, 0.0084, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 03:59:05,723 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22995.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 03:59:17,858 INFO [finetune.py:976] (5/7) Epoch 5, batch 100, loss[loss=0.2056, simple_loss=0.2586, pruned_loss=0.07628, over 4819.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2841, pruned_loss=0.08796, over 381257.81 frames. ], batch size: 25, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 03:59:23,734 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.764e+02 2.029e+02 2.456e+02 6.922e+02, threshold=4.057e+02, percent-clipped=5.0 +2023-03-26 03:59:36,985 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23041.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 03:59:51,326 INFO [finetune.py:976] (5/7) Epoch 5, batch 150, loss[loss=0.2594, simple_loss=0.2984, pruned_loss=0.1102, over 4838.00 frames. ], tot_loss[loss=0.227, simple_loss=0.2798, pruned_loss=0.0871, over 508666.80 frames. ], batch size: 30, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 04:00:08,778 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23089.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:00:28,336 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23108.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:00:30,574 INFO [finetune.py:976] (5/7) Epoch 5, batch 200, loss[loss=0.1978, simple_loss=0.2528, pruned_loss=0.07135, over 4775.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2756, pruned_loss=0.08379, over 606164.98 frames. ], batch size: 29, lr: 3.95e-03, grad_scale: 64.0 +2023-03-26 04:00:42,592 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.047e+02 1.662e+02 1.994e+02 2.595e+02 4.858e+02, threshold=3.989e+02, percent-clipped=3.0 +2023-03-26 04:01:01,382 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23150.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:01:09,514 INFO [finetune.py:976] (5/7) Epoch 5, batch 250, loss[loss=0.2348, simple_loss=0.2962, pruned_loss=0.08667, over 4914.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2795, pruned_loss=0.08411, over 685783.88 frames. ], batch size: 42, lr: 3.95e-03, grad_scale: 64.0 +2023-03-26 04:01:23,298 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23169.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:01:31,047 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23181.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:01:49,657 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9906, 1.1766, 1.5739, 1.6430, 1.5080, 1.5566, 1.5663, 1.6244], + device='cuda:5'), covar=tensor([0.7941, 1.1160, 0.9346, 0.9885, 1.1014, 0.8280, 1.3090, 0.8875], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0251, 0.0257, 0.0261, 0.0242, 0.0219, 0.0278, 0.0224], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:01:57,782 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3772, 1.4657, 1.5691, 1.7721, 1.5441, 3.3598, 1.3150, 1.5780], + device='cuda:5'), covar=tensor([0.1008, 0.1746, 0.1313, 0.1084, 0.1607, 0.0230, 0.1443, 0.1631], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0082, 0.0078, 0.0080, 0.0093, 0.0084, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 04:02:00,760 INFO [finetune.py:976] (5/7) Epoch 5, batch 300, loss[loss=0.2566, simple_loss=0.3226, pruned_loss=0.09524, over 4818.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2814, pruned_loss=0.08404, over 745512.72 frames. ], batch size: 33, lr: 3.95e-03, grad_scale: 64.0 +2023-03-26 04:02:11,903 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23217.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:02:20,057 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.235e+02 1.736e+02 2.142e+02 2.597e+02 5.294e+02, threshold=4.284e+02, percent-clipped=3.0 +2023-03-26 04:02:21,975 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5495, 1.4149, 2.1875, 3.2657, 2.2448, 2.2617, 0.8855, 2.5227], + device='cuda:5'), covar=tensor([0.1988, 0.1618, 0.1263, 0.0618, 0.0889, 0.1662, 0.1992, 0.0754], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0119, 0.0136, 0.0167, 0.0103, 0.0143, 0.0128, 0.0105], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 04:02:31,213 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23229.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:02:47,930 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.10 vs. limit=5.0 +2023-03-26 04:03:03,005 INFO [finetune.py:976] (5/7) Epoch 5, batch 350, loss[loss=0.2037, simple_loss=0.2709, pruned_loss=0.06829, over 4742.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2864, pruned_loss=0.08628, over 792449.61 frames. ], batch size: 27, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 04:03:16,137 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5222, 1.4045, 1.2751, 1.5538, 1.4169, 1.6052, 0.7830, 1.2992], + device='cuda:5'), covar=tensor([0.2520, 0.2387, 0.2045, 0.1906, 0.2107, 0.1274, 0.3252, 0.2081], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0209, 0.0198, 0.0184, 0.0235, 0.0174, 0.0214, 0.0186], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:03:20,914 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23278.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:03:34,247 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23290.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:03:41,703 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23295.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:03:51,625 INFO [finetune.py:976] (5/7) Epoch 5, batch 400, loss[loss=0.2165, simple_loss=0.2801, pruned_loss=0.0764, over 4822.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.2889, pruned_loss=0.08733, over 829290.23 frames. ], batch size: 30, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 04:03:58,185 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.903e+01 1.731e+02 2.116e+02 2.565e+02 5.981e+02, threshold=4.232e+02, percent-clipped=1.0 +2023-03-26 04:04:13,600 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23343.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:04:16,622 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9277, 4.1948, 4.1090, 2.1254, 4.3250, 3.0965, 0.7165, 2.8320], + device='cuda:5'), covar=tensor([0.2840, 0.1565, 0.1301, 0.3061, 0.0750, 0.1057, 0.4690, 0.1486], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0171, 0.0163, 0.0129, 0.0156, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 04:04:24,906 INFO [finetune.py:976] (5/7) Epoch 5, batch 450, loss[loss=0.2148, simple_loss=0.2788, pruned_loss=0.07543, over 4911.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2882, pruned_loss=0.08724, over 857772.61 frames. ], batch size: 37, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 04:04:26,948 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 04:05:10,552 INFO [finetune.py:976] (5/7) Epoch 5, batch 500, loss[loss=0.3189, simple_loss=0.3573, pruned_loss=0.1402, over 4204.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.2847, pruned_loss=0.08616, over 876884.80 frames. ], batch size: 65, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 04:05:11,378 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-03-26 04:05:16,626 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.006e+02 1.775e+02 2.029e+02 2.615e+02 5.539e+02, threshold=4.057e+02, percent-clipped=1.0 +2023-03-26 04:05:42,946 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23450.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:05:51,804 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9696, 1.8452, 1.5359, 1.9512, 2.0354, 1.7135, 2.3156, 1.9585], + device='cuda:5'), covar=tensor([0.1665, 0.3192, 0.3894, 0.3199, 0.2719, 0.1870, 0.3616, 0.2292], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0193, 0.0237, 0.0254, 0.0226, 0.0187, 0.0210, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:05:52,276 INFO [finetune.py:976] (5/7) Epoch 5, batch 550, loss[loss=0.2189, simple_loss=0.2712, pruned_loss=0.08329, over 4819.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2796, pruned_loss=0.08336, over 893304.32 frames. ], batch size: 51, lr: 3.95e-03, grad_scale: 32.0 +2023-03-26 04:05:54,213 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23464.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:06:15,722 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23481.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:06:23,747 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23488.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:06:26,096 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6754, 1.5469, 1.5546, 1.6827, 1.2061, 3.6233, 1.4165, 1.9123], + device='cuda:5'), covar=tensor([0.3446, 0.2495, 0.2121, 0.2349, 0.1952, 0.0182, 0.2660, 0.1355], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0112, 0.0116, 0.0120, 0.0116, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 04:06:30,292 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23498.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:06:35,206 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3485, 1.5230, 1.6894, 1.8357, 1.5855, 3.3589, 1.3103, 1.5957], + device='cuda:5'), covar=tensor([0.0970, 0.1603, 0.1333, 0.1016, 0.1525, 0.0264, 0.1333, 0.1591], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0082, 0.0078, 0.0080, 0.0093, 0.0084, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 04:06:38,164 INFO [finetune.py:976] (5/7) Epoch 5, batch 600, loss[loss=0.1499, simple_loss=0.227, pruned_loss=0.03641, over 4762.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2786, pruned_loss=0.08361, over 906934.73 frames. ], batch size: 28, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:06:44,765 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.759e+02 2.043e+02 2.434e+02 4.744e+02, threshold=4.086e+02, percent-clipped=2.0 +2023-03-26 04:06:51,177 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23529.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:07:18,729 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23549.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:07:25,881 INFO [finetune.py:976] (5/7) Epoch 5, batch 650, loss[loss=0.2201, simple_loss=0.2838, pruned_loss=0.07824, over 4904.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2817, pruned_loss=0.08429, over 915102.84 frames. ], batch size: 37, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:07:33,765 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23573.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:07:43,053 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23585.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:08:01,902 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2813, 2.8429, 2.6288, 1.4669, 2.7366, 2.4484, 2.3311, 2.3727], + device='cuda:5'), covar=tensor([0.0922, 0.0955, 0.1786, 0.2416, 0.1822, 0.2028, 0.1894, 0.1379], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0202, 0.0190, 0.0216, 0.0209, 0.0220, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:08:14,419 INFO [finetune.py:976] (5/7) Epoch 5, batch 700, loss[loss=0.2103, simple_loss=0.2733, pruned_loss=0.0736, over 4818.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.2834, pruned_loss=0.08523, over 922523.87 frames. ], batch size: 33, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:08:30,908 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.701e+02 2.127e+02 2.576e+02 5.648e+02, threshold=4.253e+02, percent-clipped=2.0 +2023-03-26 04:09:04,042 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0497, 0.9802, 1.0910, 0.2894, 0.8444, 1.1524, 1.1934, 1.0942], + device='cuda:5'), covar=tensor([0.0985, 0.0584, 0.0511, 0.0646, 0.0586, 0.0626, 0.0425, 0.0700], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0158, 0.0118, 0.0137, 0.0133, 0.0122, 0.0148, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.8256e-05, 1.1722e-04, 8.5898e-05, 9.9929e-05, 9.5671e-05, 9.0316e-05, + 1.1025e-04, 1.0767e-04], device='cuda:5') +2023-03-26 04:09:25,209 INFO [finetune.py:976] (5/7) Epoch 5, batch 750, loss[loss=0.2378, simple_loss=0.2986, pruned_loss=0.08851, over 4820.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.285, pruned_loss=0.08591, over 929651.05 frames. ], batch size: 40, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:09:46,468 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 04:10:02,096 INFO [finetune.py:976] (5/7) Epoch 5, batch 800, loss[loss=0.1912, simple_loss=0.2634, pruned_loss=0.05954, over 4902.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.2851, pruned_loss=0.08522, over 936993.39 frames. ], batch size: 43, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:10:07,720 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8510, 1.2423, 0.7097, 1.7026, 2.1458, 1.3551, 1.6717, 1.8082], + device='cuda:5'), covar=tensor([0.1496, 0.2213, 0.2504, 0.1187, 0.2077, 0.2274, 0.1465, 0.1882], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0100, 0.0118, 0.0094, 0.0125, 0.0098, 0.0102, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 04:10:08,708 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.635e+02 1.954e+02 2.429e+02 4.773e+02, threshold=3.908e+02, percent-clipped=1.0 +2023-03-26 04:10:56,825 INFO [finetune.py:976] (5/7) Epoch 5, batch 850, loss[loss=0.2242, simple_loss=0.281, pruned_loss=0.08368, over 4868.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2844, pruned_loss=0.08552, over 940994.32 frames. ], batch size: 31, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:11:04,267 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23764.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:11:47,447 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3407, 1.2614, 1.6050, 2.4391, 1.6826, 2.2175, 0.8563, 2.0087], + device='cuda:5'), covar=tensor([0.1812, 0.1639, 0.1103, 0.0663, 0.0906, 0.1148, 0.1700, 0.0711], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0119, 0.0136, 0.0167, 0.0103, 0.0143, 0.0129, 0.0105], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 04:11:54,656 INFO [finetune.py:976] (5/7) Epoch 5, batch 900, loss[loss=0.2115, simple_loss=0.2638, pruned_loss=0.07958, over 4841.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2796, pruned_loss=0.08294, over 942311.85 frames. ], batch size: 30, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:11:55,341 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23812.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:12:00,785 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.642e+02 1.957e+02 2.389e+02 4.840e+02, threshold=3.913e+02, percent-clipped=2.0 +2023-03-26 04:12:21,823 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23844.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 04:12:21,861 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3320, 1.1548, 1.2062, 1.1703, 1.5033, 1.4733, 1.2841, 1.1733], + device='cuda:5'), covar=tensor([0.0281, 0.0298, 0.0468, 0.0283, 0.0189, 0.0298, 0.0290, 0.0364], + device='cuda:5'), in_proj_covar=tensor([0.0087, 0.0114, 0.0138, 0.0119, 0.0105, 0.0101, 0.0091, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.7849e-05, 8.9462e-05, 1.1138e-04, 9.4098e-05, 8.2882e-05, 7.5384e-05, + 6.9843e-05, 8.5622e-05], device='cuda:5') +2023-03-26 04:12:37,546 INFO [finetune.py:976] (5/7) Epoch 5, batch 950, loss[loss=0.2277, simple_loss=0.2866, pruned_loss=0.08438, over 4916.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2772, pruned_loss=0.08224, over 944304.30 frames. ], batch size: 36, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:12:44,930 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23873.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:12:52,630 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23885.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:13:27,671 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1185, 1.3751, 1.0452, 1.3414, 1.4766, 2.5298, 1.1929, 1.5195], + device='cuda:5'), covar=tensor([0.1014, 0.1735, 0.1189, 0.1008, 0.1694, 0.0383, 0.1567, 0.1682], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0082, 0.0078, 0.0080, 0.0093, 0.0084, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 04:13:28,833 INFO [finetune.py:976] (5/7) Epoch 5, batch 1000, loss[loss=0.2285, simple_loss=0.2926, pruned_loss=0.08217, over 4748.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2797, pruned_loss=0.08408, over 946141.46 frames. ], batch size: 27, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:13:30,100 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9378, 2.5507, 2.2176, 1.3205, 2.3362, 2.4040, 2.2139, 2.3994], + device='cuda:5'), covar=tensor([0.0697, 0.0731, 0.1371, 0.1824, 0.1330, 0.1543, 0.1547, 0.0811], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0202, 0.0190, 0.0216, 0.0208, 0.0220, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:13:38,584 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.702e+02 2.066e+02 2.385e+02 5.722e+02, threshold=4.131e+02, percent-clipped=3.0 +2023-03-26 04:13:38,657 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23921.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:13:47,709 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=23933.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:14:14,856 INFO [finetune.py:976] (5/7) Epoch 5, batch 1050, loss[loss=0.2739, simple_loss=0.3262, pruned_loss=0.1108, over 4840.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.284, pruned_loss=0.08539, over 948576.54 frames. ], batch size: 44, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:15:11,174 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1471, 3.5780, 3.7486, 4.0482, 3.8738, 3.5832, 4.2316, 1.3728], + device='cuda:5'), covar=tensor([0.0767, 0.0823, 0.0788, 0.0831, 0.1300, 0.1506, 0.0638, 0.4906], + device='cuda:5'), in_proj_covar=tensor([0.0357, 0.0243, 0.0273, 0.0290, 0.0337, 0.0284, 0.0304, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:15:19,194 INFO [finetune.py:976] (5/7) Epoch 5, batch 1100, loss[loss=0.2126, simple_loss=0.2782, pruned_loss=0.07345, over 4896.00 frames. ], tot_loss[loss=0.229, simple_loss=0.2862, pruned_loss=0.08593, over 950444.43 frames. ], batch size: 37, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:15:28,416 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.244e+02 1.825e+02 2.109e+02 2.589e+02 5.024e+02, threshold=4.219e+02, percent-clipped=4.0 +2023-03-26 04:15:38,235 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24037.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:15:54,485 INFO [finetune.py:976] (5/7) Epoch 5, batch 1150, loss[loss=0.2008, simple_loss=0.269, pruned_loss=0.06632, over 4764.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2872, pruned_loss=0.08588, over 952911.51 frames. ], batch size: 28, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:16:18,936 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24098.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:16:28,016 INFO [finetune.py:976] (5/7) Epoch 5, batch 1200, loss[loss=0.241, simple_loss=0.2988, pruned_loss=0.09156, over 4741.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.2863, pruned_loss=0.08553, over 951923.06 frames. ], batch size: 54, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:16:35,107 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.11 vs. limit=5.0 +2023-03-26 04:16:37,230 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.193e+02 1.721e+02 2.129e+02 2.606e+02 7.150e+02, threshold=4.257e+02, percent-clipped=3.0 +2023-03-26 04:16:46,556 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1155, 1.2900, 0.9770, 1.2967, 1.4166, 2.5133, 1.1193, 1.4570], + device='cuda:5'), covar=tensor([0.1010, 0.1808, 0.1208, 0.1010, 0.1617, 0.0371, 0.1558, 0.1691], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0081, 0.0077, 0.0079, 0.0092, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 04:16:51,889 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=24144.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 04:16:59,643 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 04:17:03,455 INFO [finetune.py:976] (5/7) Epoch 5, batch 1250, loss[loss=0.2647, simple_loss=0.3144, pruned_loss=0.1075, over 4816.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.2841, pruned_loss=0.08532, over 953138.19 frames. ], batch size: 40, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:17:28,510 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=24192.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:17:42,425 INFO [finetune.py:976] (5/7) Epoch 5, batch 1300, loss[loss=0.2065, simple_loss=0.2536, pruned_loss=0.07968, over 3992.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2797, pruned_loss=0.0832, over 953930.07 frames. ], batch size: 17, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:17:56,618 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.098e+02 1.607e+02 1.851e+02 2.364e+02 3.844e+02, threshold=3.702e+02, percent-clipped=0.0 +2023-03-26 04:18:19,565 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1003, 1.7574, 2.8011, 1.5638, 2.3827, 2.5807, 1.8100, 2.5352], + device='cuda:5'), covar=tensor([0.1712, 0.2270, 0.1396, 0.2561, 0.0969, 0.1564, 0.2572, 0.1041], + device='cuda:5'), in_proj_covar=tensor([0.0204, 0.0204, 0.0200, 0.0195, 0.0182, 0.0221, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:18:34,750 INFO [finetune.py:976] (5/7) Epoch 5, batch 1350, loss[loss=0.2441, simple_loss=0.3046, pruned_loss=0.09177, over 4724.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2796, pruned_loss=0.08309, over 955252.88 frames. ], batch size: 59, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:18:37,164 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7797, 1.8205, 1.8145, 1.0727, 2.0231, 1.9634, 1.8232, 1.6465], + device='cuda:5'), covar=tensor([0.0662, 0.0633, 0.0685, 0.0959, 0.0507, 0.0698, 0.0663, 0.1098], + device='cuda:5'), in_proj_covar=tensor([0.0139, 0.0134, 0.0145, 0.0128, 0.0112, 0.0144, 0.0147, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:19:12,806 INFO [finetune.py:976] (5/7) Epoch 5, batch 1400, loss[loss=0.2803, simple_loss=0.3346, pruned_loss=0.113, over 4769.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2828, pruned_loss=0.08399, over 953974.58 frames. ], batch size: 54, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:19:21,621 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.714e+02 2.138e+02 2.571e+02 4.877e+02, threshold=4.276e+02, percent-clipped=6.0 +2023-03-26 04:19:44,456 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-03-26 04:19:52,155 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8793, 1.7432, 1.4866, 1.6897, 1.8841, 1.5688, 2.1210, 1.8852], + device='cuda:5'), covar=tensor([0.1812, 0.3297, 0.4258, 0.3418, 0.3123, 0.2097, 0.3791, 0.2387], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0194, 0.0238, 0.0255, 0.0228, 0.0189, 0.0211, 0.0190], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:19:56,902 INFO [finetune.py:976] (5/7) Epoch 5, batch 1450, loss[loss=0.2595, simple_loss=0.307, pruned_loss=0.106, over 4754.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2845, pruned_loss=0.0844, over 953450.83 frames. ], batch size: 26, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:20:20,192 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24393.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:20:31,768 INFO [finetune.py:976] (5/7) Epoch 5, batch 1500, loss[loss=0.2007, simple_loss=0.271, pruned_loss=0.06522, over 4823.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2845, pruned_loss=0.08431, over 954792.95 frames. ], batch size: 30, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:20:38,324 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.776e+02 2.138e+02 2.564e+02 4.291e+02, threshold=4.276e+02, percent-clipped=1.0 +2023-03-26 04:21:13,454 INFO [finetune.py:976] (5/7) Epoch 5, batch 1550, loss[loss=0.2144, simple_loss=0.2841, pruned_loss=0.07233, over 4860.00 frames. ], tot_loss[loss=0.226, simple_loss=0.2844, pruned_loss=0.08381, over 956940.41 frames. ], batch size: 34, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:21:47,122 INFO [finetune.py:976] (5/7) Epoch 5, batch 1600, loss[loss=0.1875, simple_loss=0.2517, pruned_loss=0.06162, over 4820.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.282, pruned_loss=0.08275, over 956590.41 frames. ], batch size: 38, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:21:58,799 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.770e+02 2.018e+02 2.552e+02 5.194e+02, threshold=4.037e+02, percent-clipped=4.0 +2023-03-26 04:22:21,141 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24547.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:22:33,857 INFO [finetune.py:976] (5/7) Epoch 5, batch 1650, loss[loss=0.1907, simple_loss=0.2512, pruned_loss=0.06514, over 4679.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2786, pruned_loss=0.0814, over 956915.43 frames. ], batch size: 23, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:22:43,199 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24569.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:23:14,932 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.7571, 4.1041, 4.2458, 4.5583, 4.4573, 4.2714, 4.8520, 1.4832], + device='cuda:5'), covar=tensor([0.0750, 0.0922, 0.0805, 0.0974, 0.1315, 0.1577, 0.0570, 0.5613], + device='cuda:5'), in_proj_covar=tensor([0.0356, 0.0241, 0.0272, 0.0290, 0.0336, 0.0283, 0.0302, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:23:17,424 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24608.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:23:22,440 INFO [finetune.py:976] (5/7) Epoch 5, batch 1700, loss[loss=0.198, simple_loss=0.2574, pruned_loss=0.06932, over 4763.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2749, pruned_loss=0.07992, over 957189.45 frames. ], batch size: 54, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:23:31,254 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.673e+02 1.915e+02 2.251e+02 4.027e+02, threshold=3.830e+02, percent-clipped=0.0 +2023-03-26 04:23:42,118 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24630.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:24:06,429 INFO [finetune.py:976] (5/7) Epoch 5, batch 1750, loss[loss=0.2724, simple_loss=0.3271, pruned_loss=0.1088, over 4922.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2786, pruned_loss=0.08161, over 958326.82 frames. ], batch size: 42, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:24:27,999 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=24693.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:24:39,307 INFO [finetune.py:976] (5/7) Epoch 5, batch 1800, loss[loss=0.2843, simple_loss=0.3384, pruned_loss=0.1152, over 4735.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2823, pruned_loss=0.08297, over 957334.96 frames. ], batch size: 54, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:24:45,830 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.243e+02 1.800e+02 2.166e+02 2.491e+02 4.201e+02, threshold=4.331e+02, percent-clipped=2.0 +2023-03-26 04:24:59,913 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=24741.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:25:12,942 INFO [finetune.py:976] (5/7) Epoch 5, batch 1850, loss[loss=0.2784, simple_loss=0.3301, pruned_loss=0.1134, over 4830.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.2853, pruned_loss=0.08445, over 959138.58 frames. ], batch size: 49, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:25:13,276 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.70 vs. limit=5.0 +2023-03-26 04:25:15,493 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24765.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:25:46,396 INFO [finetune.py:976] (5/7) Epoch 5, batch 1900, loss[loss=0.2383, simple_loss=0.2957, pruned_loss=0.09047, over 4878.00 frames. ], tot_loss[loss=0.228, simple_loss=0.2861, pruned_loss=0.08489, over 958034.87 frames. ], batch size: 32, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:25:52,459 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.802e+02 2.061e+02 2.489e+02 6.200e+02, threshold=4.122e+02, percent-clipped=1.0 +2023-03-26 04:25:57,963 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24826.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:26:20,846 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:26:29,633 INFO [finetune.py:976] (5/7) Epoch 5, batch 1950, loss[loss=0.1745, simple_loss=0.2282, pruned_loss=0.06036, over 4801.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.2841, pruned_loss=0.08419, over 956566.54 frames. ], batch size: 45, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:26:45,928 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5252, 1.5392, 1.3879, 1.3695, 1.8363, 1.7457, 1.6345, 1.5304], + device='cuda:5'), covar=tensor([0.0322, 0.0361, 0.0509, 0.0345, 0.0248, 0.0486, 0.0341, 0.0363], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0114, 0.0139, 0.0119, 0.0106, 0.0101, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.8754e-05, 8.9727e-05, 1.1203e-04, 9.3938e-05, 8.3920e-05, 7.5348e-05, + 7.0512e-05, 8.6165e-05], device='cuda:5') +2023-03-26 04:26:57,668 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24903.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:27:01,815 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24909.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:27:02,458 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1103, 1.8782, 1.8111, 2.0890, 2.5739, 1.9959, 1.8614, 1.5749], + device='cuda:5'), covar=tensor([0.2413, 0.2591, 0.2082, 0.1931, 0.2202, 0.1342, 0.2741, 0.2051], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0211, 0.0201, 0.0186, 0.0238, 0.0175, 0.0217, 0.0188], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:27:02,944 INFO [finetune.py:976] (5/7) Epoch 5, batch 2000, loss[loss=0.1901, simple_loss=0.2593, pruned_loss=0.06044, over 4918.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2808, pruned_loss=0.08317, over 957179.19 frames. ], batch size: 36, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:27:13,002 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.611e+02 2.012e+02 2.424e+02 3.709e+02, threshold=4.024e+02, percent-clipped=0.0 +2023-03-26 04:27:15,540 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24925.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:27:50,326 INFO [finetune.py:976] (5/7) Epoch 5, batch 2050, loss[loss=0.1808, simple_loss=0.2555, pruned_loss=0.053, over 4914.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2769, pruned_loss=0.08124, over 958066.06 frames. ], batch size: 36, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:27:57,766 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6425, 1.4386, 1.4878, 1.5855, 1.1593, 3.4099, 1.4460, 1.9760], + device='cuda:5'), covar=tensor([0.3410, 0.2407, 0.2165, 0.2288, 0.1921, 0.0181, 0.2636, 0.1249], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0118, 0.0098, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 04:28:08,391 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.82 vs. limit=5.0 +2023-03-26 04:28:12,195 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24995.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:28:23,340 INFO [finetune.py:976] (5/7) Epoch 5, batch 2100, loss[loss=0.2485, simple_loss=0.3047, pruned_loss=0.09609, over 4738.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2752, pruned_loss=0.08034, over 954341.41 frames. ], batch size: 59, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:28:39,037 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.247e+01 1.663e+02 1.989e+02 2.457e+02 4.446e+02, threshold=3.978e+02, percent-clipped=1.0 +2023-03-26 04:28:42,874 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1766, 2.1423, 1.6850, 0.9048, 1.8402, 1.7478, 1.5531, 1.8837], + device='cuda:5'), covar=tensor([0.0955, 0.0650, 0.1464, 0.1972, 0.1338, 0.2180, 0.2161, 0.0911], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0201, 0.0203, 0.0190, 0.0216, 0.0209, 0.0221, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:28:52,640 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-03-26 04:28:57,713 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9808, 1.7434, 1.6463, 1.8730, 2.5021, 1.8474, 1.8538, 1.4072], + device='cuda:5'), covar=tensor([0.2406, 0.2430, 0.2140, 0.1991, 0.2220, 0.1306, 0.2708, 0.2033], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0208, 0.0199, 0.0184, 0.0235, 0.0174, 0.0214, 0.0187], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:29:08,276 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25056.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:29:11,162 INFO [finetune.py:976] (5/7) Epoch 5, batch 2150, loss[loss=0.2425, simple_loss=0.3058, pruned_loss=0.08959, over 4912.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.2802, pruned_loss=0.0822, over 952668.95 frames. ], batch size: 43, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:29:25,377 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 04:29:45,152 INFO [finetune.py:976] (5/7) Epoch 5, batch 2200, loss[loss=0.244, simple_loss=0.3018, pruned_loss=0.09311, over 4838.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2822, pruned_loss=0.08305, over 952298.48 frames. ], batch size: 30, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:29:52,261 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.691e+02 1.983e+02 2.301e+02 4.176e+02, threshold=3.967e+02, percent-clipped=1.0 +2023-03-26 04:29:52,345 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25121.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:29:54,189 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8608, 1.2830, 0.9061, 1.6768, 2.1414, 1.3974, 1.6528, 1.7823], + device='cuda:5'), covar=tensor([0.1389, 0.1929, 0.2056, 0.1197, 0.1901, 0.1973, 0.1356, 0.1822], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0098, 0.0116, 0.0093, 0.0123, 0.0096, 0.0100, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 04:29:56,636 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6106, 1.4656, 1.4365, 1.5671, 0.9414, 3.3092, 1.2961, 1.8023], + device='cuda:5'), covar=tensor([0.3488, 0.2512, 0.2166, 0.2399, 0.2205, 0.0193, 0.2937, 0.1422], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0114, 0.0117, 0.0121, 0.0117, 0.0098, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 04:30:00,394 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9332, 1.1978, 1.7465, 1.7022, 1.5644, 1.5756, 1.5901, 1.6044], + device='cuda:5'), covar=tensor([0.5220, 0.7769, 0.6174, 0.6958, 0.8012, 0.5730, 0.8864, 0.5976], + device='cuda:5'), in_proj_covar=tensor([0.0228, 0.0249, 0.0255, 0.0259, 0.0241, 0.0218, 0.0274, 0.0222], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:30:18,607 INFO [finetune.py:976] (5/7) Epoch 5, batch 2250, loss[loss=0.1785, simple_loss=0.2482, pruned_loss=0.05439, over 4749.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2827, pruned_loss=0.08302, over 950983.17 frames. ], batch size: 54, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:30:29,283 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6195, 1.1782, 0.8985, 1.4351, 2.0490, 1.0016, 1.3858, 1.6608], + device='cuda:5'), covar=tensor([0.1502, 0.2112, 0.2071, 0.1311, 0.2043, 0.2216, 0.1460, 0.1962], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0098, 0.0117, 0.0094, 0.0124, 0.0097, 0.0101, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 04:30:32,846 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2089, 3.6562, 3.8377, 4.0445, 3.9604, 3.6778, 4.3091, 1.4542], + device='cuda:5'), covar=tensor([0.0760, 0.0829, 0.0702, 0.0815, 0.1196, 0.1465, 0.0605, 0.4985], + device='cuda:5'), in_proj_covar=tensor([0.0359, 0.0244, 0.0274, 0.0292, 0.0338, 0.0283, 0.0304, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:30:43,740 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 04:30:46,376 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25203.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:30:47,788 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25204.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:30:53,044 INFO [finetune.py:976] (5/7) Epoch 5, batch 2300, loss[loss=0.1815, simple_loss=0.2439, pruned_loss=0.05953, over 4902.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.284, pruned_loss=0.08348, over 952540.45 frames. ], batch size: 37, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:31:05,176 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.840e+02 2.117e+02 2.638e+02 5.911e+02, threshold=4.234e+02, percent-clipped=5.0 +2023-03-26 04:31:11,682 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4113, 1.3316, 1.4946, 2.4655, 1.7333, 2.0827, 0.8858, 2.0303], + device='cuda:5'), covar=tensor([0.1799, 0.1629, 0.1267, 0.0709, 0.0900, 0.1160, 0.1693, 0.0761], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0118, 0.0135, 0.0165, 0.0102, 0.0142, 0.0128, 0.0104], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 04:31:14,049 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25225.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:31:35,991 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=25251.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:31:42,890 INFO [finetune.py:976] (5/7) Epoch 5, batch 2350, loss[loss=0.1965, simple_loss=0.2543, pruned_loss=0.06937, over 4832.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2819, pruned_loss=0.08284, over 951307.50 frames. ], batch size: 47, lr: 3.94e-03, grad_scale: 64.0 +2023-03-26 04:31:51,257 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=25273.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:32:16,769 INFO [finetune.py:976] (5/7) Epoch 5, batch 2400, loss[loss=0.2629, simple_loss=0.3014, pruned_loss=0.1121, over 4896.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2782, pruned_loss=0.08154, over 951447.57 frames. ], batch size: 35, lr: 3.94e-03, grad_scale: 64.0 +2023-03-26 04:32:23,862 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.631e+02 1.900e+02 2.318e+02 5.058e+02, threshold=3.799e+02, percent-clipped=1.0 +2023-03-26 04:32:58,174 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25351.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 04:33:04,184 INFO [finetune.py:976] (5/7) Epoch 5, batch 2450, loss[loss=0.2785, simple_loss=0.3142, pruned_loss=0.1214, over 4905.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2742, pruned_loss=0.07999, over 953148.91 frames. ], batch size: 35, lr: 3.94e-03, grad_scale: 64.0 +2023-03-26 04:34:02,098 INFO [finetune.py:976] (5/7) Epoch 5, batch 2500, loss[loss=0.2115, simple_loss=0.2773, pruned_loss=0.07291, over 4830.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2753, pruned_loss=0.0804, over 951711.22 frames. ], batch size: 49, lr: 3.94e-03, grad_scale: 64.0 +2023-03-26 04:34:18,830 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.793e+02 2.115e+02 2.620e+02 5.379e+02, threshold=4.229e+02, percent-clipped=6.0 +2023-03-26 04:34:18,937 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25421.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:34:24,896 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7649, 1.6412, 1.6427, 1.6572, 1.1707, 2.9155, 1.2985, 1.7689], + device='cuda:5'), covar=tensor([0.2930, 0.2082, 0.1763, 0.2159, 0.1761, 0.0284, 0.2231, 0.1110], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0117, 0.0122, 0.0117, 0.0098, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 04:34:26,150 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25428.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:34:46,578 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7373, 1.6327, 1.4228, 1.4915, 1.9585, 1.9082, 1.7192, 1.3993], + device='cuda:5'), covar=tensor([0.0311, 0.0273, 0.0586, 0.0319, 0.0210, 0.0412, 0.0297, 0.0393], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0113, 0.0139, 0.0119, 0.0106, 0.0102, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.8490e-05, 8.9277e-05, 1.1205e-04, 9.4044e-05, 8.3925e-05, 7.5613e-05, + 7.0241e-05, 8.5939e-05], device='cuda:5') +2023-03-26 04:34:47,669 INFO [finetune.py:976] (5/7) Epoch 5, batch 2550, loss[loss=0.2253, simple_loss=0.2805, pruned_loss=0.0851, over 4892.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.28, pruned_loss=0.08234, over 951410.80 frames. ], batch size: 32, lr: 3.94e-03, grad_scale: 64.0 +2023-03-26 04:34:53,577 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=25469.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:35:07,251 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25489.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 04:35:16,176 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25504.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:35:20,834 INFO [finetune.py:976] (5/7) Epoch 5, batch 2600, loss[loss=0.1923, simple_loss=0.2674, pruned_loss=0.0586, over 4873.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2825, pruned_loss=0.08333, over 953964.83 frames. ], batch size: 34, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:35:28,040 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.693e+02 2.088e+02 2.425e+02 4.415e+02, threshold=4.177e+02, percent-clipped=1.0 +2023-03-26 04:35:48,653 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=25552.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:35:52,017 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 04:35:54,566 INFO [finetune.py:976] (5/7) Epoch 5, batch 2650, loss[loss=0.1908, simple_loss=0.2632, pruned_loss=0.0592, over 4814.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2831, pruned_loss=0.08322, over 952108.69 frames. ], batch size: 33, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:36:33,955 INFO [finetune.py:976] (5/7) Epoch 5, batch 2700, loss[loss=0.2185, simple_loss=0.2779, pruned_loss=0.07957, over 4781.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2819, pruned_loss=0.08219, over 951361.23 frames. ], batch size: 28, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:36:50,929 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.711e+02 2.002e+02 2.331e+02 3.948e+02, threshold=4.004e+02, percent-clipped=0.0 +2023-03-26 04:37:23,204 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9056, 2.4639, 2.1729, 1.1148, 2.3226, 2.2844, 1.8655, 2.1271], + device='cuda:5'), covar=tensor([0.0677, 0.0827, 0.1385, 0.1925, 0.1446, 0.1716, 0.1943, 0.1000], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0203, 0.0189, 0.0215, 0.0208, 0.0219, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:37:26,221 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25651.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 04:37:28,105 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7611, 1.5684, 2.2566, 3.3679, 2.3129, 2.5298, 1.1508, 2.6399], + device='cuda:5'), covar=tensor([0.1773, 0.1431, 0.1195, 0.0565, 0.0779, 0.1216, 0.1890, 0.0685], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0118, 0.0136, 0.0166, 0.0102, 0.0142, 0.0128, 0.0104], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 04:37:32,346 INFO [finetune.py:976] (5/7) Epoch 5, batch 2750, loss[loss=0.192, simple_loss=0.2545, pruned_loss=0.0648, over 4815.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2788, pruned_loss=0.0812, over 949289.23 frames. ], batch size: 39, lr: 3.94e-03, grad_scale: 32.0 +2023-03-26 04:37:42,158 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 04:37:52,007 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6405, 1.5544, 1.4048, 0.9358, 1.4406, 1.6377, 1.6794, 1.3372], + device='cuda:5'), covar=tensor([0.0706, 0.0349, 0.0498, 0.0515, 0.0410, 0.0392, 0.0219, 0.0535], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0158, 0.0119, 0.0137, 0.0133, 0.0123, 0.0148, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.8422e-05, 1.1748e-04, 8.6007e-05, 9.9784e-05, 9.6119e-05, 9.0767e-05, + 1.0979e-04, 1.0779e-04], device='cuda:5') +2023-03-26 04:37:58,592 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=25699.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:38:05,335 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7249, 1.5100, 1.3717, 1.0927, 1.4750, 1.4481, 1.4199, 2.0758], + device='cuda:5'), covar=tensor([0.7155, 0.6860, 0.5304, 0.6676, 0.5819, 0.3822, 0.6342, 0.2692], + device='cuda:5'), in_proj_covar=tensor([0.0280, 0.0255, 0.0220, 0.0283, 0.0237, 0.0199, 0.0242, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:38:07,651 INFO [finetune.py:976] (5/7) Epoch 5, batch 2800, loss[loss=0.2272, simple_loss=0.2712, pruned_loss=0.09165, over 4897.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2755, pruned_loss=0.07964, over 949965.09 frames. ], batch size: 36, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:38:23,891 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.133e+02 1.611e+02 1.888e+02 2.301e+02 3.388e+02, threshold=3.776e+02, percent-clipped=0.0 +2023-03-26 04:38:25,428 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.86 vs. limit=5.0 +2023-03-26 04:38:28,100 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 04:39:01,114 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.98 vs. limit=5.0 +2023-03-26 04:39:03,402 INFO [finetune.py:976] (5/7) Epoch 5, batch 2850, loss[loss=0.2091, simple_loss=0.2689, pruned_loss=0.07472, over 4836.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2733, pruned_loss=0.07866, over 949685.35 frames. ], batch size: 30, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:39:07,825 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1251, 1.9563, 2.0595, 0.8509, 2.2135, 2.4480, 2.0376, 1.9371], + device='cuda:5'), covar=tensor([0.0921, 0.0815, 0.0493, 0.0816, 0.0488, 0.0519, 0.0452, 0.0691], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0158, 0.0118, 0.0137, 0.0133, 0.0123, 0.0148, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.8223e-05, 1.1727e-04, 8.5798e-05, 9.9764e-05, 9.6029e-05, 9.0680e-05, + 1.0974e-04, 1.0791e-04], device='cuda:5') +2023-03-26 04:39:18,522 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25784.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:39:37,535 INFO [finetune.py:976] (5/7) Epoch 5, batch 2900, loss[loss=0.266, simple_loss=0.3338, pruned_loss=0.09906, over 4752.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2786, pruned_loss=0.08104, over 949712.87 frames. ], batch size: 54, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:39:44,762 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.812e+02 2.065e+02 2.463e+02 5.082e+02, threshold=4.130e+02, percent-clipped=4.0 +2023-03-26 04:40:09,029 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25858.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:40:10,731 INFO [finetune.py:976] (5/7) Epoch 5, batch 2950, loss[loss=0.22, simple_loss=0.266, pruned_loss=0.08703, over 4763.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2816, pruned_loss=0.08226, over 950756.46 frames. ], batch size: 26, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:40:29,913 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 04:40:43,949 INFO [finetune.py:976] (5/7) Epoch 5, batch 3000, loss[loss=0.2438, simple_loss=0.3116, pruned_loss=0.08794, over 4733.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2836, pruned_loss=0.08305, over 951843.34 frames. ], batch size: 54, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:40:43,949 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 04:40:54,557 INFO [finetune.py:1010] (5/7) Epoch 5, validation: loss=0.1652, simple_loss=0.2371, pruned_loss=0.04667, over 2265189.00 frames. +2023-03-26 04:40:54,557 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 04:41:00,087 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25919.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 04:41:01,808 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.739e+02 2.096e+02 2.435e+02 4.160e+02, threshold=4.193e+02, percent-clipped=2.0 +2023-03-26 04:41:05,191 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-03-26 04:41:27,995 INFO [finetune.py:976] (5/7) Epoch 5, batch 3050, loss[loss=0.2493, simple_loss=0.31, pruned_loss=0.09429, over 4905.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2849, pruned_loss=0.08371, over 952551.31 frames. ], batch size: 36, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:41:43,405 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 04:42:08,230 INFO [finetune.py:976] (5/7) Epoch 5, batch 3100, loss[loss=0.2073, simple_loss=0.2606, pruned_loss=0.07705, over 4374.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2833, pruned_loss=0.08246, over 953417.08 frames. ], batch size: 19, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:42:25,412 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.603e+02 1.879e+02 2.413e+02 4.411e+02, threshold=3.758e+02, percent-clipped=2.0 +2023-03-26 04:43:10,314 INFO [finetune.py:976] (5/7) Epoch 5, batch 3150, loss[loss=0.1879, simple_loss=0.2442, pruned_loss=0.06583, over 4721.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2795, pruned_loss=0.08133, over 954405.99 frames. ], batch size: 23, lr: 3.93e-03, grad_scale: 32.0 +2023-03-26 04:43:27,259 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0997, 1.9342, 1.4569, 1.8624, 1.9774, 1.6743, 2.7151, 1.9828], + device='cuda:5'), covar=tensor([0.1469, 0.2729, 0.3729, 0.3441, 0.2870, 0.1739, 0.2559, 0.2266], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0192, 0.0234, 0.0253, 0.0227, 0.0187, 0.0209, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:43:30,918 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26084.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 04:43:42,330 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26102.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:43:49,554 INFO [finetune.py:976] (5/7) Epoch 5, batch 3200, loss[loss=0.1889, simple_loss=0.2552, pruned_loss=0.06133, over 4823.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2746, pruned_loss=0.07902, over 953919.30 frames. ], batch size: 33, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:43:58,349 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.040e+02 1.599e+02 1.999e+02 2.453e+02 4.323e+02, threshold=3.997e+02, percent-clipped=1.0 +2023-03-26 04:44:04,860 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=26132.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:44:37,844 INFO [finetune.py:976] (5/7) Epoch 5, batch 3250, loss[loss=0.2663, simple_loss=0.3193, pruned_loss=0.1067, over 4740.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2751, pruned_loss=0.07932, over 952790.96 frames. ], batch size: 54, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:44:43,930 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26163.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:44:47,964 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26168.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:44:59,142 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9319, 1.8706, 1.5932, 2.0314, 1.8841, 1.8128, 1.7872, 2.5232], + device='cuda:5'), covar=tensor([0.6831, 0.7992, 0.5564, 0.7327, 0.6841, 0.3881, 0.7932, 0.2462], + device='cuda:5'), in_proj_covar=tensor([0.0279, 0.0254, 0.0219, 0.0281, 0.0237, 0.0198, 0.0241, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:45:29,259 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.41 vs. limit=5.0 +2023-03-26 04:45:40,773 INFO [finetune.py:976] (5/7) Epoch 5, batch 3300, loss[loss=0.2111, simple_loss=0.2774, pruned_loss=0.07243, over 4736.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2794, pruned_loss=0.08137, over 952761.22 frames. ], batch size: 59, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:45:47,649 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26214.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 04:46:00,062 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.165e+02 1.721e+02 2.149e+02 2.490e+02 4.939e+02, threshold=4.298e+02, percent-clipped=1.0 +2023-03-26 04:46:06,009 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26229.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:46:19,856 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26250.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:46:29,542 INFO [finetune.py:976] (5/7) Epoch 5, batch 3350, loss[loss=0.2401, simple_loss=0.2818, pruned_loss=0.09924, over 4749.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2817, pruned_loss=0.08189, over 953907.68 frames. ], batch size: 23, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:46:30,318 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.80 vs. limit=5.0 +2023-03-26 04:47:12,172 INFO [finetune.py:976] (5/7) Epoch 5, batch 3400, loss[loss=0.2222, simple_loss=0.2877, pruned_loss=0.07841, over 4748.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2831, pruned_loss=0.0824, over 954030.02 frames. ], batch size: 54, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:47:12,306 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26311.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:47:19,914 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.624e+02 1.904e+02 2.361e+02 4.543e+02, threshold=3.807e+02, percent-clipped=1.0 +2023-03-26 04:47:36,623 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-26 04:47:53,150 INFO [finetune.py:976] (5/7) Epoch 5, batch 3450, loss[loss=0.2082, simple_loss=0.2816, pruned_loss=0.06741, over 4914.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2829, pruned_loss=0.08195, over 955210.75 frames. ], batch size: 38, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:48:27,143 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7751, 1.4351, 0.8924, 1.6856, 2.1242, 1.2475, 1.6179, 1.7084], + device='cuda:5'), covar=tensor([0.1491, 0.2026, 0.2156, 0.1180, 0.1985, 0.2147, 0.1405, 0.1960], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0098, 0.0116, 0.0094, 0.0124, 0.0097, 0.0101, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 04:48:35,620 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4447, 1.2230, 1.2574, 1.3355, 1.6588, 1.5733, 1.4180, 1.2519], + device='cuda:5'), covar=tensor([0.0305, 0.0315, 0.0507, 0.0266, 0.0176, 0.0418, 0.0259, 0.0342], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0111, 0.0136, 0.0116, 0.0104, 0.0100, 0.0090, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.7152e-05, 8.7641e-05, 1.0969e-04, 9.2133e-05, 8.2060e-05, 7.4241e-05, + 6.8915e-05, 8.4308e-05], device='cuda:5') +2023-03-26 04:48:37,961 INFO [finetune.py:976] (5/7) Epoch 5, batch 3500, loss[loss=0.2151, simple_loss=0.2753, pruned_loss=0.07741, over 4912.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2807, pruned_loss=0.08165, over 956413.53 frames. ], batch size: 37, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:48:43,277 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9480, 1.7410, 2.4525, 1.5167, 2.1365, 2.2570, 1.6754, 2.3769], + device='cuda:5'), covar=tensor([0.1581, 0.2187, 0.1403, 0.2328, 0.1085, 0.1679, 0.2913, 0.0938], + device='cuda:5'), in_proj_covar=tensor([0.0206, 0.0205, 0.0202, 0.0196, 0.0185, 0.0224, 0.0216, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:48:43,566 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 04:48:43,908 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26414.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:48:50,234 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1650, 1.9793, 1.4620, 0.6522, 1.6476, 1.8292, 1.6109, 1.7584], + device='cuda:5'), covar=tensor([0.0834, 0.0680, 0.1391, 0.2001, 0.1365, 0.2278, 0.2222, 0.0938], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0202, 0.0204, 0.0191, 0.0217, 0.0210, 0.0222, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:48:51,311 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.586e+02 1.962e+02 2.229e+02 4.326e+02, threshold=3.925e+02, percent-clipped=1.0 +2023-03-26 04:49:01,596 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 04:49:23,264 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-26 04:49:23,750 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26458.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:49:25,516 INFO [finetune.py:976] (5/7) Epoch 5, batch 3550, loss[loss=0.179, simple_loss=0.2414, pruned_loss=0.0583, over 4762.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2769, pruned_loss=0.08011, over 958848.30 frames. ], batch size: 27, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:49:34,660 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26475.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:50:11,928 INFO [finetune.py:976] (5/7) Epoch 5, batch 3600, loss[loss=0.1632, simple_loss=0.2279, pruned_loss=0.04927, over 4790.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2731, pruned_loss=0.07883, over 956793.66 frames. ], batch size: 29, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:50:13,893 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26514.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:50:19,779 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.118e+02 1.588e+02 1.920e+02 2.290e+02 3.397e+02, threshold=3.841e+02, percent-clipped=0.0 +2023-03-26 04:50:20,513 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26524.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:50:31,348 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6693, 1.1720, 0.8824, 1.6196, 1.9550, 1.4916, 1.4198, 1.7193], + device='cuda:5'), covar=tensor([0.1478, 0.2126, 0.2088, 0.1186, 0.2083, 0.2162, 0.1397, 0.1766], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0116, 0.0094, 0.0124, 0.0097, 0.0101, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 04:50:55,028 INFO [finetune.py:976] (5/7) Epoch 5, batch 3650, loss[loss=0.2355, simple_loss=0.2849, pruned_loss=0.09302, over 4892.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2736, pruned_loss=0.07907, over 954625.21 frames. ], batch size: 32, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:50:55,702 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=26562.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:51:05,930 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 04:51:31,369 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26606.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:51:34,855 INFO [finetune.py:976] (5/7) Epoch 5, batch 3700, loss[loss=0.2934, simple_loss=0.3125, pruned_loss=0.1372, over 4371.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2789, pruned_loss=0.08177, over 954814.85 frames. ], batch size: 19, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:51:42,594 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.101e+02 1.866e+02 2.222e+02 2.784e+02 4.852e+02, threshold=4.444e+02, percent-clipped=6.0 +2023-03-26 04:51:46,383 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5729, 1.5528, 1.3020, 1.4531, 1.8993, 1.9309, 1.6749, 1.3870], + device='cuda:5'), covar=tensor([0.0370, 0.0369, 0.0620, 0.0347, 0.0220, 0.0431, 0.0297, 0.0428], + device='cuda:5'), in_proj_covar=tensor([0.0087, 0.0113, 0.0138, 0.0118, 0.0105, 0.0101, 0.0091, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.8151e-05, 8.8869e-05, 1.1098e-04, 9.3381e-05, 8.3232e-05, 7.5277e-05, + 6.9946e-05, 8.5689e-05], device='cuda:5') +2023-03-26 04:51:50,246 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-26 04:51:54,802 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0149, 1.4446, 1.7638, 1.7868, 1.5358, 1.6036, 1.6802, 1.6871], + device='cuda:5'), covar=tensor([0.6560, 0.9096, 0.7254, 0.8625, 0.9681, 0.6957, 1.1009, 0.6638], + device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0247, 0.0254, 0.0258, 0.0241, 0.0218, 0.0274, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:52:07,680 INFO [finetune.py:976] (5/7) Epoch 5, batch 3750, loss[loss=0.2229, simple_loss=0.2924, pruned_loss=0.07671, over 4903.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.28, pruned_loss=0.0819, over 955044.60 frames. ], batch size: 35, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:53:00,962 INFO [finetune.py:976] (5/7) Epoch 5, batch 3800, loss[loss=0.214, simple_loss=0.2651, pruned_loss=0.08151, over 4142.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2822, pruned_loss=0.08294, over 955429.14 frames. ], batch size: 65, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:53:08,704 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.710e+02 2.076e+02 2.649e+02 5.488e+02, threshold=4.152e+02, percent-clipped=2.0 +2023-03-26 04:53:26,510 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 04:53:39,802 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7755, 1.6067, 1.6436, 1.7212, 1.2342, 3.3920, 1.4257, 1.8723], + device='cuda:5'), covar=tensor([0.3343, 0.2504, 0.2000, 0.2235, 0.1916, 0.0205, 0.2505, 0.1301], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0114, 0.0117, 0.0121, 0.0117, 0.0097, 0.0101, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 04:53:57,420 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26758.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:53:58,070 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3247, 1.3548, 1.5723, 1.1034, 1.2880, 1.5151, 1.3436, 1.5940], + device='cuda:5'), covar=tensor([0.1127, 0.1973, 0.1257, 0.1464, 0.0976, 0.1285, 0.2866, 0.0868], + device='cuda:5'), in_proj_covar=tensor([0.0204, 0.0204, 0.0201, 0.0196, 0.0184, 0.0223, 0.0216, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:53:59,697 INFO [finetune.py:976] (5/7) Epoch 5, batch 3850, loss[loss=0.1799, simple_loss=0.2586, pruned_loss=0.05056, over 4919.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2802, pruned_loss=0.08189, over 954748.86 frames. ], batch size: 38, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:54:11,278 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26770.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:54:41,331 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4615, 1.4793, 1.4832, 0.8125, 1.6292, 1.5048, 1.5372, 1.3542], + device='cuda:5'), covar=tensor([0.0665, 0.0655, 0.0618, 0.0997, 0.0645, 0.0722, 0.0594, 0.1108], + device='cuda:5'), in_proj_covar=tensor([0.0138, 0.0133, 0.0143, 0.0128, 0.0111, 0.0143, 0.0145, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 04:55:00,184 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=26806.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:55:03,129 INFO [finetune.py:976] (5/7) Epoch 5, batch 3900, loss[loss=0.1848, simple_loss=0.258, pruned_loss=0.05586, over 4800.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2785, pruned_loss=0.08183, over 954638.48 frames. ], batch size: 29, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:55:21,758 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.797e+02 2.146e+02 2.516e+02 4.177e+02, threshold=4.292e+02, percent-clipped=1.0 +2023-03-26 04:55:22,493 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26824.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:55:57,247 INFO [finetune.py:976] (5/7) Epoch 5, batch 3950, loss[loss=0.2034, simple_loss=0.2631, pruned_loss=0.07183, over 4902.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2751, pruned_loss=0.08015, over 956671.43 frames. ], batch size: 35, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:56:06,712 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=26872.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:56:14,712 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6297, 1.8426, 1.4346, 1.5020, 1.9559, 2.0856, 1.9219, 1.7947], + device='cuda:5'), covar=tensor([0.0429, 0.0334, 0.0489, 0.0331, 0.0329, 0.0523, 0.0268, 0.0332], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0112, 0.0137, 0.0117, 0.0104, 0.0100, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.7233e-05, 8.7888e-05, 1.0995e-04, 9.2520e-05, 8.2591e-05, 7.4707e-05, + 6.9348e-05, 8.4675e-05], device='cuda:5') +2023-03-26 04:56:29,754 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26904.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:56:30,924 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26906.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:56:34,937 INFO [finetune.py:976] (5/7) Epoch 5, batch 4000, loss[loss=0.1719, simple_loss=0.2223, pruned_loss=0.06075, over 4244.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.273, pruned_loss=0.07934, over 955690.71 frames. ], batch size: 18, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:56:43,176 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 1.662e+02 2.009e+02 2.453e+02 3.802e+02, threshold=4.018e+02, percent-clipped=0.0 +2023-03-26 04:56:57,556 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7889, 1.6776, 1.6923, 1.8348, 1.2891, 3.7249, 1.5063, 2.0717], + device='cuda:5'), covar=tensor([0.3525, 0.2332, 0.2011, 0.2242, 0.1775, 0.0157, 0.2533, 0.1222], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0117, 0.0122, 0.0118, 0.0098, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 04:57:05,880 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26950.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:57:08,737 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=26954.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:57:18,757 INFO [finetune.py:976] (5/7) Epoch 5, batch 4050, loss[loss=0.2126, simple_loss=0.2757, pruned_loss=0.07472, over 4926.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2778, pruned_loss=0.08147, over 955433.64 frames. ], batch size: 38, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:57:26,846 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26965.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 04:58:10,823 INFO [finetune.py:976] (5/7) Epoch 5, batch 4100, loss[loss=0.227, simple_loss=0.2941, pruned_loss=0.07998, over 4878.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.2808, pruned_loss=0.08214, over 957317.16 frames. ], batch size: 31, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:58:11,446 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27011.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:58:19,549 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.740e+02 2.111e+02 2.577e+02 5.326e+02, threshold=4.223e+02, percent-clipped=3.0 +2023-03-26 04:58:26,575 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2523, 2.9162, 2.7186, 1.2606, 2.9792, 2.0802, 0.6867, 1.7591], + device='cuda:5'), covar=tensor([0.2539, 0.1888, 0.1888, 0.3511, 0.1270, 0.1246, 0.4225, 0.1692], + device='cuda:5'), in_proj_covar=tensor([0.0156, 0.0173, 0.0164, 0.0129, 0.0155, 0.0123, 0.0146, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 04:58:58,626 INFO [finetune.py:976] (5/7) Epoch 5, batch 4150, loss[loss=0.2328, simple_loss=0.2935, pruned_loss=0.08604, over 4820.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2826, pruned_loss=0.08292, over 956288.97 frames. ], batch size: 33, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:59:05,648 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27070.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:59:32,487 INFO [finetune.py:976] (5/7) Epoch 5, batch 4200, loss[loss=0.1893, simple_loss=0.2586, pruned_loss=0.06001, over 4817.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.2825, pruned_loss=0.08195, over 955110.44 frames. ], batch size: 39, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 04:59:37,724 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=27118.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 04:59:41,617 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.682e+02 1.955e+02 2.295e+02 5.538e+02, threshold=3.911e+02, percent-clipped=3.0 +2023-03-26 04:59:57,807 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27148.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:00:10,427 INFO [finetune.py:976] (5/7) Epoch 5, batch 4250, loss[loss=0.237, simple_loss=0.2849, pruned_loss=0.09452, over 4869.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2801, pruned_loss=0.08109, over 956223.63 frames. ], batch size: 31, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:00:10,544 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9599, 1.8689, 1.8128, 1.9112, 1.5686, 3.5827, 1.8710, 2.1698], + device='cuda:5'), covar=tensor([0.3541, 0.2722, 0.2103, 0.2479, 0.1720, 0.0336, 0.2185, 0.1144], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0118, 0.0098, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 05:01:08,500 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27209.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:01:09,578 INFO [finetune.py:976] (5/7) Epoch 5, batch 4300, loss[loss=0.1982, simple_loss=0.2578, pruned_loss=0.06932, over 4857.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2767, pruned_loss=0.08002, over 956930.88 frames. ], batch size: 44, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:01:26,944 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.762e+02 2.023e+02 2.453e+02 1.035e+03, threshold=4.046e+02, percent-clipped=2.0 +2023-03-26 05:01:35,880 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-26 05:01:59,785 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27260.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 05:02:00,299 INFO [finetune.py:976] (5/7) Epoch 5, batch 4350, loss[loss=0.3001, simple_loss=0.3385, pruned_loss=0.1308, over 4851.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2729, pruned_loss=0.07818, over 956323.00 frames. ], batch size: 44, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:02:30,557 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27306.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:02:33,995 INFO [finetune.py:976] (5/7) Epoch 5, batch 4400, loss[loss=0.2155, simple_loss=0.2726, pruned_loss=0.07921, over 4787.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2743, pruned_loss=0.07895, over 957327.50 frames. ], batch size: 29, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:02:41,214 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.108e+02 1.601e+02 1.888e+02 2.389e+02 3.644e+02, threshold=3.775e+02, percent-clipped=0.0 +2023-03-26 05:02:51,662 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6653, 3.6619, 3.5348, 1.6704, 3.6943, 2.7701, 0.7880, 2.6057], + device='cuda:5'), covar=tensor([0.2534, 0.2317, 0.1562, 0.3779, 0.1271, 0.1064, 0.5061, 0.1599], + device='cuda:5'), in_proj_covar=tensor([0.0156, 0.0172, 0.0164, 0.0128, 0.0156, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 05:03:07,516 INFO [finetune.py:976] (5/7) Epoch 5, batch 4450, loss[loss=0.2149, simple_loss=0.2864, pruned_loss=0.07166, over 4796.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2808, pruned_loss=0.08198, over 957389.85 frames. ], batch size: 29, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:03:11,348 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-26 05:03:40,731 INFO [finetune.py:976] (5/7) Epoch 5, batch 4500, loss[loss=0.2533, simple_loss=0.3161, pruned_loss=0.09525, over 4832.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.2821, pruned_loss=0.08216, over 955404.89 frames. ], batch size: 49, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:03:48,443 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.291e+02 1.723e+02 2.077e+02 2.543e+02 6.449e+02, threshold=4.154e+02, percent-clipped=4.0 +2023-03-26 05:04:14,226 INFO [finetune.py:976] (5/7) Epoch 5, batch 4550, loss[loss=0.2161, simple_loss=0.2805, pruned_loss=0.07587, over 4915.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2838, pruned_loss=0.0828, over 957565.38 frames. ], batch size: 37, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:04:42,683 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27504.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:04:47,382 INFO [finetune.py:976] (5/7) Epoch 5, batch 4600, loss[loss=0.1968, simple_loss=0.2474, pruned_loss=0.07309, over 4692.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2819, pruned_loss=0.08188, over 957148.50 frames. ], batch size: 23, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:04:55,105 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.119e+02 1.596e+02 2.009e+02 2.524e+02 8.514e+02, threshold=4.018e+02, percent-clipped=5.0 +2023-03-26 05:05:07,119 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0937, 1.8818, 1.4800, 0.5840, 1.6216, 1.7522, 1.5291, 1.8178], + device='cuda:5'), covar=tensor([0.0887, 0.0770, 0.1331, 0.2059, 0.1370, 0.2288, 0.2354, 0.0776], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0203, 0.0205, 0.0192, 0.0220, 0.0212, 0.0225, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:05:20,022 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27560.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:05:20,539 INFO [finetune.py:976] (5/7) Epoch 5, batch 4650, loss[loss=0.2193, simple_loss=0.2712, pruned_loss=0.08373, over 4910.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2791, pruned_loss=0.08093, over 956331.24 frames. ], batch size: 46, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:05:20,662 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6814, 2.4758, 2.1596, 1.0671, 2.3048, 2.0068, 1.8612, 2.2219], + device='cuda:5'), covar=tensor([0.0816, 0.0982, 0.1596, 0.2336, 0.1673, 0.2473, 0.2316, 0.1081], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0202, 0.0205, 0.0192, 0.0220, 0.0212, 0.0225, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:06:07,100 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27606.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:06:13,640 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=27608.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:06:15,418 INFO [finetune.py:976] (5/7) Epoch 5, batch 4700, loss[loss=0.2224, simple_loss=0.2721, pruned_loss=0.0863, over 4913.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2749, pruned_loss=0.07892, over 955134.38 frames. ], batch size: 43, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:06:27,907 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.549e+02 1.903e+02 2.293e+02 3.137e+02, threshold=3.806e+02, percent-clipped=0.0 +2023-03-26 05:06:28,031 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7436, 1.5587, 1.6063, 1.7059, 1.1130, 3.5372, 1.4050, 1.7742], + device='cuda:5'), covar=tensor([0.3315, 0.2429, 0.1998, 0.2182, 0.1836, 0.0166, 0.2442, 0.1352], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0117, 0.0098, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 05:06:28,064 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8175, 1.7206, 1.6139, 1.9337, 2.2404, 1.8586, 1.4348, 1.4603], + device='cuda:5'), covar=tensor([0.2275, 0.2199, 0.1959, 0.1700, 0.1871, 0.1220, 0.2787, 0.2016], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0211, 0.0203, 0.0186, 0.0237, 0.0177, 0.0216, 0.0190], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:07:05,705 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=27654.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:07:14,260 INFO [finetune.py:976] (5/7) Epoch 5, batch 4750, loss[loss=0.2739, simple_loss=0.3191, pruned_loss=0.1143, over 4864.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2732, pruned_loss=0.07855, over 954716.53 frames. ], batch size: 44, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:07:44,969 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 05:07:56,926 INFO [finetune.py:976] (5/7) Epoch 5, batch 4800, loss[loss=0.2653, simple_loss=0.3207, pruned_loss=0.1049, over 4758.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2766, pruned_loss=0.07989, over 956100.13 frames. ], batch size: 54, lr: 3.93e-03, grad_scale: 16.0 +2023-03-26 05:07:59,335 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27714.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:08:04,702 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.165e+02 1.708e+02 2.066e+02 2.363e+02 4.852e+02, threshold=4.133e+02, percent-clipped=3.0 +2023-03-26 05:08:30,289 INFO [finetune.py:976] (5/7) Epoch 5, batch 4850, loss[loss=0.1709, simple_loss=0.2471, pruned_loss=0.04738, over 4937.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2795, pruned_loss=0.08027, over 956192.59 frames. ], batch size: 33, lr: 3.92e-03, grad_scale: 16.0 +2023-03-26 05:08:39,499 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27775.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 05:08:59,126 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27804.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:09:04,314 INFO [finetune.py:976] (5/7) Epoch 5, batch 4900, loss[loss=0.2549, simple_loss=0.3054, pruned_loss=0.1022, over 4809.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.281, pruned_loss=0.08113, over 953574.97 frames. ], batch size: 40, lr: 3.92e-03, grad_scale: 16.0 +2023-03-26 05:09:12,048 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.126e+02 1.628e+02 1.864e+02 2.335e+02 3.818e+02, threshold=3.728e+02, percent-clipped=0.0 +2023-03-26 05:09:30,675 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=27852.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:09:37,157 INFO [finetune.py:976] (5/7) Epoch 5, batch 4950, loss[loss=0.2657, simple_loss=0.3162, pruned_loss=0.1076, over 4763.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2811, pruned_loss=0.08064, over 953245.62 frames. ], batch size: 59, lr: 3.92e-03, grad_scale: 16.0 +2023-03-26 05:10:10,462 INFO [finetune.py:976] (5/7) Epoch 5, batch 5000, loss[loss=0.2116, simple_loss=0.2621, pruned_loss=0.08057, over 4811.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2786, pruned_loss=0.07976, over 955156.17 frames. ], batch size: 25, lr: 3.92e-03, grad_scale: 16.0 +2023-03-26 05:10:19,080 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.618e+02 1.837e+02 2.301e+02 4.829e+02, threshold=3.674e+02, percent-clipped=1.0 +2023-03-26 05:10:33,864 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 05:10:43,561 INFO [finetune.py:976] (5/7) Epoch 5, batch 5050, loss[loss=0.169, simple_loss=0.2387, pruned_loss=0.04966, over 4798.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2755, pruned_loss=0.07837, over 956001.62 frames. ], batch size: 25, lr: 3.92e-03, grad_scale: 16.0 +2023-03-26 05:11:48,644 INFO [finetune.py:976] (5/7) Epoch 5, batch 5100, loss[loss=0.247, simple_loss=0.2953, pruned_loss=0.09935, over 4934.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2724, pruned_loss=0.07732, over 956372.16 frames. ], batch size: 38, lr: 3.92e-03, grad_scale: 16.0 +2023-03-26 05:12:02,949 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.639e+02 1.875e+02 2.408e+02 3.954e+02, threshold=3.749e+02, percent-clipped=2.0 +2023-03-26 05:12:28,079 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3402, 4.5923, 4.8143, 5.1142, 4.9991, 4.7744, 5.4293, 1.5920], + device='cuda:5'), covar=tensor([0.0655, 0.0812, 0.0770, 0.1033, 0.1145, 0.1488, 0.0485, 0.5589], + device='cuda:5'), in_proj_covar=tensor([0.0355, 0.0244, 0.0276, 0.0293, 0.0337, 0.0286, 0.0304, 0.0298], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:12:30,312 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-03-26 05:12:32,817 INFO [finetune.py:976] (5/7) Epoch 5, batch 5150, loss[loss=0.2478, simple_loss=0.3023, pruned_loss=0.09664, over 4807.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2736, pruned_loss=0.07825, over 957445.04 frames. ], batch size: 51, lr: 3.92e-03, grad_scale: 16.0 +2023-03-26 05:12:38,916 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28070.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:13:06,310 INFO [finetune.py:976] (5/7) Epoch 5, batch 5200, loss[loss=0.2072, simple_loss=0.2853, pruned_loss=0.06454, over 4818.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2772, pruned_loss=0.07932, over 956750.28 frames. ], batch size: 45, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:13:12,682 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 05:13:14,539 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 1.748e+02 1.996e+02 2.342e+02 5.311e+02, threshold=3.992e+02, percent-clipped=1.0 +2023-03-26 05:13:39,346 INFO [finetune.py:976] (5/7) Epoch 5, batch 5250, loss[loss=0.201, simple_loss=0.2471, pruned_loss=0.0774, over 4012.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2798, pruned_loss=0.08086, over 954842.62 frames. ], batch size: 17, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:13:53,261 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9305, 1.4088, 0.7423, 1.8067, 2.2941, 1.4932, 1.8799, 1.9217], + device='cuda:5'), covar=tensor([0.1465, 0.2078, 0.2311, 0.1152, 0.1877, 0.1913, 0.1336, 0.1833], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0098, 0.0116, 0.0094, 0.0125, 0.0097, 0.0101, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 05:13:56,190 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4543, 1.4260, 1.5173, 0.8314, 1.5834, 1.5025, 1.4989, 1.2957], + device='cuda:5'), covar=tensor([0.0670, 0.0739, 0.0705, 0.0977, 0.0654, 0.0777, 0.0668, 0.1250], + device='cuda:5'), in_proj_covar=tensor([0.0139, 0.0134, 0.0144, 0.0129, 0.0112, 0.0143, 0.0147, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:14:04,868 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.7624, 1.6059, 1.6225, 0.7811, 1.6156, 1.8479, 1.8738, 1.4650], + device='cuda:5'), covar=tensor([0.0936, 0.0588, 0.0526, 0.0661, 0.0505, 0.0476, 0.0352, 0.0670], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0159, 0.0121, 0.0138, 0.0134, 0.0124, 0.0148, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.8399e-05, 1.1788e-04, 8.7799e-05, 1.0076e-04, 9.6214e-05, 9.1979e-05, + 1.1010e-04, 1.0857e-04], device='cuda:5') +2023-03-26 05:14:10,148 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-03-26 05:14:12,318 INFO [finetune.py:976] (5/7) Epoch 5, batch 5300, loss[loss=0.2437, simple_loss=0.2873, pruned_loss=0.1001, over 4752.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.2815, pruned_loss=0.08175, over 953761.26 frames. ], batch size: 27, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:14:19,562 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.181e+02 1.725e+02 1.957e+02 2.435e+02 6.444e+02, threshold=3.915e+02, percent-clipped=2.0 +2023-03-26 05:14:23,136 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8930, 1.7271, 1.4196, 1.5580, 1.6337, 1.6060, 1.5707, 2.4147], + device='cuda:5'), covar=tensor([0.7079, 0.7438, 0.5601, 0.7391, 0.6147, 0.4033, 0.6946, 0.2596], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0258, 0.0220, 0.0284, 0.0239, 0.0202, 0.0244, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:14:24,327 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28229.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:14:36,307 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3649, 1.4133, 1.5012, 0.8540, 1.3589, 1.6975, 1.7722, 1.3505], + device='cuda:5'), covar=tensor([0.0844, 0.0495, 0.0419, 0.0506, 0.0452, 0.0423, 0.0271, 0.0536], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0159, 0.0121, 0.0138, 0.0133, 0.0124, 0.0148, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.8065e-05, 1.1765e-04, 8.7583e-05, 1.0040e-04, 9.5965e-05, 9.1572e-05, + 1.0984e-04, 1.0835e-04], device='cuda:5') +2023-03-26 05:14:49,710 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-03-26 05:14:51,800 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28255.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:14:56,277 INFO [finetune.py:976] (5/7) Epoch 5, batch 5350, loss[loss=0.1935, simple_loss=0.2473, pruned_loss=0.06982, over 4702.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2803, pruned_loss=0.08053, over 954399.86 frames. ], batch size: 23, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:15:04,664 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-03-26 05:15:13,546 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28287.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:15:15,754 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28290.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:15:28,417 INFO [finetune.py:976] (5/7) Epoch 5, batch 5400, loss[loss=0.198, simple_loss=0.2556, pruned_loss=0.07017, over 4904.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2761, pruned_loss=0.07857, over 953570.41 frames. ], batch size: 43, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:15:31,980 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28316.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:15:36,019 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.900e+01 1.568e+02 1.878e+02 2.260e+02 3.573e+02, threshold=3.756e+02, percent-clipped=0.0 +2023-03-26 05:15:53,766 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28348.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:16:00,148 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-26 05:16:01,448 INFO [finetune.py:976] (5/7) Epoch 5, batch 5450, loss[loss=0.2079, simple_loss=0.2583, pruned_loss=0.07871, over 4282.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.273, pruned_loss=0.07763, over 952823.62 frames. ], batch size: 18, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:16:18,293 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28370.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:17:02,778 INFO [finetune.py:976] (5/7) Epoch 5, batch 5500, loss[loss=0.1823, simple_loss=0.2423, pruned_loss=0.06117, over 4900.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2713, pruned_loss=0.07705, over 955633.24 frames. ], batch size: 32, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:17:12,778 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=28418.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 05:17:21,151 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.059e+02 1.651e+02 2.036e+02 2.478e+02 5.642e+02, threshold=4.072e+02, percent-clipped=3.0 +2023-03-26 05:18:07,649 INFO [finetune.py:976] (5/7) Epoch 5, batch 5550, loss[loss=0.2034, simple_loss=0.2556, pruned_loss=0.07556, over 4717.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2732, pruned_loss=0.07823, over 954680.40 frames. ], batch size: 23, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:18:18,839 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9826, 1.9668, 1.9263, 1.2385, 2.1056, 2.0712, 1.9538, 1.6282], + device='cuda:5'), covar=tensor([0.0696, 0.0649, 0.0734, 0.0987, 0.0490, 0.0705, 0.0699, 0.1074], + device='cuda:5'), in_proj_covar=tensor([0.0141, 0.0136, 0.0146, 0.0130, 0.0113, 0.0145, 0.0148, 0.0165], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:18:36,711 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28486.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:18:45,485 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28492.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:18:59,297 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5084, 1.4405, 2.1764, 3.1456, 2.1933, 2.2191, 1.2974, 2.3935], + device='cuda:5'), covar=tensor([0.1933, 0.1590, 0.1182, 0.0658, 0.0870, 0.1460, 0.1697, 0.0753], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0118, 0.0136, 0.0165, 0.0103, 0.0141, 0.0128, 0.0103], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 05:19:02,058 INFO [finetune.py:976] (5/7) Epoch 5, batch 5600, loss[loss=0.2541, simple_loss=0.3191, pruned_loss=0.09458, over 4904.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2775, pruned_loss=0.08011, over 952393.17 frames. ], batch size: 37, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:19:14,572 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.753e+02 2.098e+02 2.591e+02 4.684e+02, threshold=4.196e+02, percent-clipped=2.0 +2023-03-26 05:19:40,194 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28547.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:19:47,576 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28553.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:19:52,218 INFO [finetune.py:976] (5/7) Epoch 5, batch 5650, loss[loss=0.267, simple_loss=0.3227, pruned_loss=0.1056, over 4873.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2791, pruned_loss=0.08019, over 952145.57 frames. ], batch size: 34, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:19:58,223 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9832, 1.8635, 1.4741, 1.9428, 1.9607, 1.6546, 2.3973, 1.9852], + device='cuda:5'), covar=tensor([0.1644, 0.3195, 0.3927, 0.3707, 0.3106, 0.2053, 0.3601, 0.2246], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0193, 0.0236, 0.0254, 0.0230, 0.0189, 0.0211, 0.0190], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:20:06,328 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28585.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:20:21,892 INFO [finetune.py:976] (5/7) Epoch 5, batch 5700, loss[loss=0.1813, simple_loss=0.2338, pruned_loss=0.06435, over 4221.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2755, pruned_loss=0.07958, over 937117.35 frames. ], batch size: 18, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:20:21,935 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28611.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 05:20:29,013 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.580e+02 1.894e+02 2.210e+02 5.665e+02, threshold=3.789e+02, percent-clipped=1.0 +2023-03-26 05:20:53,189 INFO [finetune.py:976] (5/7) Epoch 6, batch 0, loss[loss=0.2209, simple_loss=0.2864, pruned_loss=0.07769, over 4819.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2864, pruned_loss=0.07769, over 4819.00 frames. ], batch size: 30, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:20:53,189 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 05:20:56,558 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4546, 1.5341, 1.5172, 1.6419, 1.6537, 2.9257, 1.4130, 1.6308], + device='cuda:5'), covar=tensor([0.1012, 0.1711, 0.1102, 0.0989, 0.1502, 0.0361, 0.1438, 0.1695], + device='cuda:5'), in_proj_covar=tensor([0.0079, 0.0083, 0.0078, 0.0081, 0.0094, 0.0084, 0.0087, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 05:21:08,971 INFO [finetune.py:1010] (5/7) Epoch 6, validation: loss=0.1659, simple_loss=0.2379, pruned_loss=0.04693, over 2265189.00 frames. +2023-03-26 05:21:08,971 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 05:21:15,235 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28643.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:21:28,194 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.11 vs. limit=5.0 +2023-03-26 05:21:56,250 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5927, 2.9565, 2.4668, 1.8402, 2.9198, 2.9642, 2.9279, 2.4343], + device='cuda:5'), covar=tensor([0.0744, 0.0547, 0.0948, 0.1079, 0.0415, 0.0855, 0.0681, 0.1076], + device='cuda:5'), in_proj_covar=tensor([0.0142, 0.0136, 0.0147, 0.0132, 0.0114, 0.0146, 0.0149, 0.0167], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:21:59,713 INFO [finetune.py:976] (5/7) Epoch 6, batch 50, loss[loss=0.1946, simple_loss=0.2646, pruned_loss=0.06231, over 4830.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.2861, pruned_loss=0.08319, over 218045.35 frames. ], batch size: 47, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:22:19,626 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6032, 1.6975, 2.0868, 1.7920, 1.7984, 4.0701, 1.4118, 1.8682], + device='cuda:5'), covar=tensor([0.0934, 0.1618, 0.1198, 0.1047, 0.1499, 0.0190, 0.1452, 0.1617], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0082, 0.0078, 0.0080, 0.0093, 0.0084, 0.0087, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 05:22:30,502 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.667e+02 1.978e+02 2.446e+02 6.098e+02, threshold=3.955e+02, percent-clipped=3.0 +2023-03-26 05:22:41,772 INFO [finetune.py:976] (5/7) Epoch 6, batch 100, loss[loss=0.207, simple_loss=0.2724, pruned_loss=0.07079, over 4918.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2769, pruned_loss=0.07919, over 382166.74 frames. ], batch size: 46, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:23:15,381 INFO [finetune.py:976] (5/7) Epoch 6, batch 150, loss[loss=0.1981, simple_loss=0.2513, pruned_loss=0.07251, over 4919.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2711, pruned_loss=0.07737, over 508068.30 frames. ], batch size: 37, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:23:37,613 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.593e+02 1.877e+02 2.260e+02 4.734e+02, threshold=3.755e+02, percent-clipped=1.0 +2023-03-26 05:23:48,135 INFO [finetune.py:976] (5/7) Epoch 6, batch 200, loss[loss=0.2125, simple_loss=0.2815, pruned_loss=0.07173, over 4832.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2695, pruned_loss=0.07768, over 607477.64 frames. ], batch size: 49, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:23:50,546 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28842.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:23:55,125 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:24:03,983 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28861.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:24:07,038 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6329, 1.1519, 0.7885, 1.6384, 1.8986, 1.4195, 1.3994, 1.7253], + device='cuda:5'), covar=tensor([0.2061, 0.2891, 0.2862, 0.1471, 0.2574, 0.2954, 0.2067, 0.2513], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0116, 0.0093, 0.0124, 0.0097, 0.0100, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 05:24:19,107 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28885.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:24:26,390 INFO [finetune.py:976] (5/7) Epoch 6, batch 250, loss[loss=0.2062, simple_loss=0.265, pruned_loss=0.07367, over 4830.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2722, pruned_loss=0.07819, over 685381.65 frames. ], batch size: 30, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:24:47,340 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28911.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 05:25:02,682 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28922.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:25:03,160 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.192e+02 1.635e+02 1.950e+02 2.422e+02 4.878e+02, threshold=3.900e+02, percent-clipped=5.0 +2023-03-26 05:25:03,888 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5244, 1.3826, 2.0857, 3.2961, 2.2043, 2.2265, 0.8378, 2.4310], + device='cuda:5'), covar=tensor([0.2026, 0.1660, 0.1431, 0.0506, 0.0879, 0.1711, 0.2177, 0.0738], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0119, 0.0136, 0.0165, 0.0103, 0.0142, 0.0128, 0.0103], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 05:25:09,280 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=28933.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:25:13,898 INFO [finetune.py:976] (5/7) Epoch 6, batch 300, loss[loss=0.2399, simple_loss=0.298, pruned_loss=0.09088, over 4923.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2773, pruned_loss=0.07916, over 747246.08 frames. ], batch size: 38, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:25:16,444 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28943.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:25:28,663 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=28959.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:25:47,462 INFO [finetune.py:976] (5/7) Epoch 6, batch 350, loss[loss=0.2302, simple_loss=0.2898, pruned_loss=0.08535, over 4895.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2799, pruned_loss=0.08042, over 792361.07 frames. ], batch size: 36, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:25:49,492 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=28991.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:26:03,148 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29002.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:26:28,284 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.836e+02 2.201e+02 2.620e+02 4.241e+02, threshold=4.402e+02, percent-clipped=2.0 +2023-03-26 05:26:38,007 INFO [finetune.py:976] (5/7) Epoch 6, batch 400, loss[loss=0.2032, simple_loss=0.2557, pruned_loss=0.07533, over 4740.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2798, pruned_loss=0.07966, over 830446.38 frames. ], batch size: 23, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:26:45,495 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-26 05:27:01,631 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29063.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:27:16,376 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.17 vs. limit=5.0 +2023-03-26 05:27:17,312 INFO [finetune.py:976] (5/7) Epoch 6, batch 450, loss[loss=0.2226, simple_loss=0.2791, pruned_loss=0.08308, over 4891.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.279, pruned_loss=0.07984, over 859400.20 frames. ], batch size: 43, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:27:45,347 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.158e+02 1.708e+02 1.996e+02 2.284e+02 5.200e+02, threshold=3.993e+02, percent-clipped=1.0 +2023-03-26 05:27:55,105 INFO [finetune.py:976] (5/7) Epoch 6, batch 500, loss[loss=0.2135, simple_loss=0.2751, pruned_loss=0.07598, over 4782.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.276, pruned_loss=0.07911, over 881923.89 frames. ], batch size: 26, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:27:57,546 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29142.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:28:01,660 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29148.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:28:26,096 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2948, 1.5257, 1.5427, 0.7848, 1.3379, 1.7017, 1.7742, 1.3983], + device='cuda:5'), covar=tensor([0.0868, 0.0556, 0.0490, 0.0541, 0.0560, 0.0457, 0.0327, 0.0568], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0159, 0.0121, 0.0137, 0.0133, 0.0124, 0.0147, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.8294e-05, 1.1760e-04, 8.7695e-05, 1.0028e-04, 9.5812e-05, 9.1600e-05, + 1.0880e-04, 1.0783e-04], device='cuda:5') +2023-03-26 05:28:28,349 INFO [finetune.py:976] (5/7) Epoch 6, batch 550, loss[loss=0.2631, simple_loss=0.3158, pruned_loss=0.1051, over 4901.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2725, pruned_loss=0.07745, over 900261.02 frames. ], batch size: 35, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:28:28,997 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=29190.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:28:34,888 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=29196.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:29:00,099 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29217.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:29:04,678 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.109e+02 1.638e+02 2.076e+02 2.525e+02 4.090e+02, threshold=4.153e+02, percent-clipped=1.0 +2023-03-26 05:29:18,509 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29236.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:29:20,236 INFO [finetune.py:976] (5/7) Epoch 6, batch 600, loss[loss=0.2632, simple_loss=0.3107, pruned_loss=0.1079, over 4861.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2744, pruned_loss=0.0786, over 912440.53 frames. ], batch size: 31, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:30:24,096 INFO [finetune.py:976] (5/7) Epoch 6, batch 650, loss[loss=0.2396, simple_loss=0.3123, pruned_loss=0.08346, over 4767.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2763, pruned_loss=0.07862, over 917829.44 frames. ], batch size: 54, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:30:34,102 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29297.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:31:03,944 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9849, 4.2048, 4.1113, 2.4615, 4.2685, 3.1990, 1.3968, 3.1977], + device='cuda:5'), covar=tensor([0.2206, 0.1837, 0.1281, 0.2469, 0.0822, 0.0876, 0.3848, 0.1091], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0172, 0.0163, 0.0128, 0.0156, 0.0122, 0.0145, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 05:31:13,222 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.735e+02 1.982e+02 2.438e+02 4.583e+02, threshold=3.965e+02, percent-clipped=2.0 +2023-03-26 05:31:33,679 INFO [finetune.py:976] (5/7) Epoch 6, batch 700, loss[loss=0.2752, simple_loss=0.3345, pruned_loss=0.108, over 4363.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2781, pruned_loss=0.07931, over 922942.09 frames. ], batch size: 65, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:31:46,290 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29358.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:32:17,024 INFO [finetune.py:976] (5/7) Epoch 6, batch 750, loss[loss=0.3082, simple_loss=0.3415, pruned_loss=0.1375, over 4342.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2796, pruned_loss=0.07971, over 932023.33 frames. ], batch size: 65, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:32:59,102 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.656e+01 1.814e+02 2.109e+02 2.501e+02 5.044e+02, threshold=4.217e+02, percent-clipped=3.0 +2023-03-26 05:33:26,485 INFO [finetune.py:976] (5/7) Epoch 6, batch 800, loss[loss=0.2555, simple_loss=0.3048, pruned_loss=0.1031, over 4230.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2807, pruned_loss=0.0801, over 935335.76 frames. ], batch size: 66, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:34:10,552 INFO [finetune.py:976] (5/7) Epoch 6, batch 850, loss[loss=0.1949, simple_loss=0.2548, pruned_loss=0.06753, over 4330.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2776, pruned_loss=0.07844, over 940731.59 frames. ], batch size: 65, lr: 3.92e-03, grad_scale: 32.0 +2023-03-26 05:34:43,550 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29517.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:34:52,273 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.241e+02 1.650e+02 2.021e+02 2.394e+02 5.702e+02, threshold=4.042e+02, percent-clipped=1.0 +2023-03-26 05:35:14,737 INFO [finetune.py:976] (5/7) Epoch 6, batch 900, loss[loss=0.2126, simple_loss=0.2747, pruned_loss=0.0752, over 4934.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.274, pruned_loss=0.07675, over 945510.93 frames. ], batch size: 33, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:35:32,176 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29551.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:35:46,204 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=29565.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:36:14,404 INFO [finetune.py:976] (5/7) Epoch 6, batch 950, loss[loss=0.1898, simple_loss=0.2545, pruned_loss=0.06252, over 4800.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2734, pruned_loss=0.07712, over 947217.66 frames. ], batch size: 29, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:36:21,392 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29592.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:36:43,997 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29612.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:36:53,350 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 05:36:56,225 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.661e+02 1.977e+02 2.359e+02 4.237e+02, threshold=3.954e+02, percent-clipped=2.0 +2023-03-26 05:37:17,827 INFO [finetune.py:976] (5/7) Epoch 6, batch 1000, loss[loss=0.2743, simple_loss=0.3264, pruned_loss=0.1111, over 4891.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.275, pruned_loss=0.07769, over 950296.41 frames. ], batch size: 32, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:37:45,062 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29658.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:37:47,498 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29662.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:38:20,623 INFO [finetune.py:976] (5/7) Epoch 6, batch 1050, loss[loss=0.2256, simple_loss=0.2735, pruned_loss=0.08889, over 4164.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2798, pruned_loss=0.08022, over 950872.65 frames. ], batch size: 65, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:38:41,612 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=29706.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:39:01,053 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 05:39:02,905 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.795e+02 2.095e+02 2.533e+02 7.754e+02, threshold=4.191e+02, percent-clipped=4.0 +2023-03-26 05:39:03,036 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29723.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 05:39:23,373 INFO [finetune.py:976] (5/7) Epoch 6, batch 1100, loss[loss=0.1859, simple_loss=0.2581, pruned_loss=0.05685, over 4748.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2807, pruned_loss=0.08061, over 949999.05 frames. ], batch size: 54, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:40:13,698 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-26 05:40:24,916 INFO [finetune.py:976] (5/7) Epoch 6, batch 1150, loss[loss=0.2557, simple_loss=0.3056, pruned_loss=0.1029, over 4818.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2812, pruned_loss=0.08124, over 950657.09 frames. ], batch size: 33, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:40:37,981 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29802.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:41:01,070 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.742e+02 2.057e+02 2.377e+02 6.600e+02, threshold=4.115e+02, percent-clipped=1.0 +2023-03-26 05:41:03,420 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0962, 2.3751, 2.4252, 1.2677, 2.5292, 2.1624, 1.6730, 2.0636], + device='cuda:5'), covar=tensor([0.0874, 0.1298, 0.1868, 0.2597, 0.2012, 0.2218, 0.2859, 0.1607], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0200, 0.0201, 0.0189, 0.0216, 0.0208, 0.0219, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:41:11,693 INFO [finetune.py:976] (5/7) Epoch 6, batch 1200, loss[loss=0.2032, simple_loss=0.252, pruned_loss=0.0772, over 4768.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2789, pruned_loss=0.08025, over 953167.65 frames. ], batch size: 27, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:41:28,771 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29863.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:41:45,292 INFO [finetune.py:976] (5/7) Epoch 6, batch 1250, loss[loss=0.2243, simple_loss=0.2765, pruned_loss=0.08599, over 4763.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2753, pruned_loss=0.07853, over 955172.58 frames. ], batch size: 26, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:41:47,166 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29892.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:41:57,710 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29907.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:42:07,774 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.638e+02 1.953e+02 2.201e+02 4.150e+02, threshold=3.906e+02, percent-clipped=1.0 +2023-03-26 05:42:18,519 INFO [finetune.py:976] (5/7) Epoch 6, batch 1300, loss[loss=0.2224, simple_loss=0.2778, pruned_loss=0.0835, over 4899.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2724, pruned_loss=0.07757, over 953894.35 frames. ], batch size: 32, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:42:19,141 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=29940.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:42:21,946 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 05:42:53,738 INFO [finetune.py:976] (5/7) Epoch 6, batch 1350, loss[loss=0.2231, simple_loss=0.2849, pruned_loss=0.08061, over 4821.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2739, pruned_loss=0.07856, over 950322.92 frames. ], batch size: 38, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:43:04,457 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 05:43:22,437 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30018.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 05:43:25,359 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.107e+02 1.660e+02 2.040e+02 2.486e+02 4.804e+02, threshold=4.081e+02, percent-clipped=2.0 +2023-03-26 05:43:30,072 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 05:43:32,002 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0292, 2.0164, 2.0030, 1.4147, 2.0792, 2.2333, 2.1308, 1.6994], + device='cuda:5'), covar=tensor([0.0549, 0.0532, 0.0625, 0.0887, 0.0493, 0.0561, 0.0560, 0.0959], + device='cuda:5'), in_proj_covar=tensor([0.0139, 0.0136, 0.0145, 0.0129, 0.0114, 0.0144, 0.0148, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:43:35,491 INFO [finetune.py:976] (5/7) Epoch 6, batch 1400, loss[loss=0.2297, simple_loss=0.2864, pruned_loss=0.0865, over 4874.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2766, pruned_loss=0.07923, over 951491.43 frames. ], batch size: 31, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:44:14,311 INFO [finetune.py:976] (5/7) Epoch 6, batch 1450, loss[loss=0.1817, simple_loss=0.2344, pruned_loss=0.06446, over 4298.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2774, pruned_loss=0.07886, over 951551.05 frames. ], batch size: 18, lr: 3.91e-03, grad_scale: 64.0 +2023-03-26 05:44:58,650 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.184e+02 1.825e+02 2.216e+02 2.642e+02 7.386e+02, threshold=4.431e+02, percent-clipped=2.0 +2023-03-26 05:44:59,438 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30125.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:45:09,719 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 05:45:16,731 INFO [finetune.py:976] (5/7) Epoch 6, batch 1500, loss[loss=0.2122, simple_loss=0.2778, pruned_loss=0.0733, over 4836.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2799, pruned_loss=0.08051, over 952241.89 frames. ], batch size: 47, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:45:38,725 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30158.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:45:59,652 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30186.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:46:01,366 INFO [finetune.py:976] (5/7) Epoch 6, batch 1550, loss[loss=0.2345, simple_loss=0.2967, pruned_loss=0.08613, over 4904.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.279, pruned_loss=0.07958, over 951533.58 frames. ], batch size: 37, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:46:01,444 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8053, 4.1352, 3.9365, 2.3269, 4.1797, 3.1569, 0.7146, 2.9640], + device='cuda:5'), covar=tensor([0.2989, 0.1806, 0.1693, 0.2942, 0.0834, 0.0895, 0.4710, 0.1440], + device='cuda:5'), in_proj_covar=tensor([0.0157, 0.0173, 0.0165, 0.0130, 0.0158, 0.0124, 0.0146, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 05:46:12,979 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30207.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:46:14,698 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30209.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:46:25,113 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.698e+02 2.038e+02 2.458e+02 3.858e+02, threshold=4.076e+02, percent-clipped=0.0 +2023-03-26 05:46:34,722 INFO [finetune.py:976] (5/7) Epoch 6, batch 1600, loss[loss=0.2805, simple_loss=0.3083, pruned_loss=0.1264, over 4202.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2783, pruned_loss=0.07973, over 951406.96 frames. ], batch size: 18, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:46:44,947 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=30255.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:46:51,641 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.54 vs. limit=5.0 +2023-03-26 05:46:56,001 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30270.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:47:08,016 INFO [finetune.py:976] (5/7) Epoch 6, batch 1650, loss[loss=0.2532, simple_loss=0.3064, pruned_loss=0.1001, over 4908.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2747, pruned_loss=0.07811, over 952752.48 frames. ], batch size: 36, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:47:25,260 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 05:47:27,140 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30318.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 05:47:31,617 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.710e+02 1.999e+02 2.408e+02 3.997e+02, threshold=3.998e+02, percent-clipped=0.0 +2023-03-26 05:47:41,367 INFO [finetune.py:976] (5/7) Epoch 6, batch 1700, loss[loss=0.2077, simple_loss=0.2677, pruned_loss=0.07384, over 4822.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2733, pruned_loss=0.07839, over 953615.57 frames. ], batch size: 33, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:47:55,185 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30360.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:47:58,775 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=30366.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:48:27,034 INFO [finetune.py:976] (5/7) Epoch 6, batch 1750, loss[loss=0.2368, simple_loss=0.3003, pruned_loss=0.08669, over 4894.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2759, pruned_loss=0.07948, over 955061.70 frames. ], batch size: 35, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:48:48,765 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30421.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:48:50,965 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.220e+01 1.748e+02 2.265e+02 2.778e+02 4.150e+02, threshold=4.530e+02, percent-clipped=1.0 +2023-03-26 05:49:00,663 INFO [finetune.py:976] (5/7) Epoch 6, batch 1800, loss[loss=0.2555, simple_loss=0.32, pruned_loss=0.09549, over 4755.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2788, pruned_loss=0.0799, over 955492.63 frames. ], batch size: 54, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:49:13,230 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30458.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:49:23,933 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7236, 1.4945, 1.5493, 1.6050, 1.1483, 3.6883, 1.4540, 2.0757], + device='cuda:5'), covar=tensor([0.3483, 0.2482, 0.2118, 0.2319, 0.1924, 0.0146, 0.2679, 0.1260], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0114, 0.0118, 0.0121, 0.0117, 0.0099, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 05:49:28,999 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30481.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:49:29,633 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2790, 3.6922, 3.8818, 4.1218, 4.0525, 3.7946, 4.3637, 1.5562], + device='cuda:5'), covar=tensor([0.0718, 0.0760, 0.0777, 0.0889, 0.1037, 0.1442, 0.0612, 0.4704], + device='cuda:5'), in_proj_covar=tensor([0.0354, 0.0241, 0.0276, 0.0292, 0.0330, 0.0283, 0.0303, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:49:33,833 INFO [finetune.py:976] (5/7) Epoch 6, batch 1850, loss[loss=0.2058, simple_loss=0.273, pruned_loss=0.06933, over 4821.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2807, pruned_loss=0.08067, over 957015.07 frames. ], batch size: 47, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:49:36,897 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.79 vs. limit=5.0 +2023-03-26 05:49:44,728 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=30506.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:50:03,761 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.078e+02 1.671e+02 2.010e+02 2.577e+02 4.739e+02, threshold=4.020e+02, percent-clipped=1.0 +2023-03-26 05:50:03,958 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.58 vs. limit=5.0 +2023-03-26 05:50:22,794 INFO [finetune.py:976] (5/7) Epoch 6, batch 1900, loss[loss=0.2144, simple_loss=0.2807, pruned_loss=0.07401, over 4858.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2816, pruned_loss=0.08075, over 958194.80 frames. ], batch size: 31, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:50:33,154 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0642, 1.5992, 1.8844, 1.8766, 1.7012, 1.6952, 1.8411, 1.7647], + device='cuda:5'), covar=tensor([0.6317, 0.7800, 0.6091, 0.7747, 0.8462, 0.6029, 1.0167, 0.5885], + device='cuda:5'), in_proj_covar=tensor([0.0229, 0.0246, 0.0254, 0.0255, 0.0241, 0.0219, 0.0273, 0.0224], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:50:52,223 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30565.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:51:20,334 INFO [finetune.py:976] (5/7) Epoch 6, batch 1950, loss[loss=0.2781, simple_loss=0.3158, pruned_loss=0.1202, over 4896.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2795, pruned_loss=0.07978, over 958150.75 frames. ], batch size: 35, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:51:31,211 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3425, 1.4456, 1.5700, 1.2042, 1.3245, 1.4856, 1.3644, 1.6527], + device='cuda:5'), covar=tensor([0.1257, 0.2124, 0.1352, 0.1538, 0.0994, 0.1189, 0.3058, 0.0863], + device='cuda:5'), in_proj_covar=tensor([0.0205, 0.0203, 0.0198, 0.0194, 0.0182, 0.0219, 0.0215, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:51:58,196 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.556e+02 1.883e+02 2.215e+02 4.222e+02, threshold=3.767e+02, percent-clipped=2.0 +2023-03-26 05:52:09,313 INFO [finetune.py:976] (5/7) Epoch 6, batch 2000, loss[loss=0.2911, simple_loss=0.3055, pruned_loss=0.1383, over 2891.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2767, pruned_loss=0.07901, over 955781.28 frames. ], batch size: 12, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:52:19,613 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30655.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:52:51,536 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 05:52:52,075 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8337, 1.2939, 1.0459, 1.7124, 2.3478, 1.4523, 1.6802, 1.6961], + device='cuda:5'), covar=tensor([0.1497, 0.2178, 0.2004, 0.1210, 0.1805, 0.2051, 0.1422, 0.2037], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0099, 0.0115, 0.0094, 0.0125, 0.0097, 0.0101, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 05:52:56,174 INFO [finetune.py:976] (5/7) Epoch 6, batch 2050, loss[loss=0.1948, simple_loss=0.2659, pruned_loss=0.06188, over 4865.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2713, pruned_loss=0.07587, over 956035.98 frames. ], batch size: 31, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:52:57,416 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9972, 1.8774, 1.5703, 2.0052, 2.0353, 1.7548, 2.4285, 2.0301], + device='cuda:5'), covar=tensor([0.1687, 0.3226, 0.3758, 0.3372, 0.2863, 0.1866, 0.3538, 0.2184], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0192, 0.0236, 0.0254, 0.0231, 0.0191, 0.0211, 0.0191], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:53:02,864 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0655, 1.9286, 1.7911, 2.0696, 2.5936, 2.0659, 1.8346, 1.6231], + device='cuda:5'), covar=tensor([0.1947, 0.2103, 0.1792, 0.1660, 0.1831, 0.1129, 0.2340, 0.1707], + device='cuda:5'), in_proj_covar=tensor([0.0234, 0.0208, 0.0202, 0.0186, 0.0236, 0.0175, 0.0214, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:53:14,341 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30716.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:53:14,403 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30716.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 05:53:20,053 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.687e+02 1.889e+02 2.318e+02 4.112e+02, threshold=3.779e+02, percent-clipped=3.0 +2023-03-26 05:53:40,715 INFO [finetune.py:976] (5/7) Epoch 6, batch 2100, loss[loss=0.1915, simple_loss=0.2657, pruned_loss=0.05866, over 4807.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2716, pruned_loss=0.07556, over 958057.74 frames. ], batch size: 41, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:54:11,269 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5683, 1.6452, 1.3615, 1.7433, 2.0621, 1.7007, 1.1314, 1.3264], + device='cuda:5'), covar=tensor([0.2485, 0.2195, 0.2154, 0.1824, 0.1953, 0.1293, 0.3055, 0.1989], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0208, 0.0202, 0.0186, 0.0236, 0.0175, 0.0213, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:54:13,624 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30781.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:54:18,840 INFO [finetune.py:976] (5/7) Epoch 6, batch 2150, loss[loss=0.1785, simple_loss=0.259, pruned_loss=0.04904, over 4804.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2759, pruned_loss=0.07758, over 957451.77 frames. ], batch size: 29, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:54:19,476 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5079, 1.3646, 1.2469, 1.3493, 1.6997, 1.6471, 1.4782, 1.2268], + device='cuda:5'), covar=tensor([0.0276, 0.0310, 0.0554, 0.0325, 0.0210, 0.0390, 0.0309, 0.0398], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0111, 0.0136, 0.0116, 0.0103, 0.0099, 0.0090, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.7340e-05, 8.7107e-05, 1.0936e-04, 9.1946e-05, 8.1243e-05, 7.3812e-05, + 6.8790e-05, 8.4332e-05], device='cuda:5') +2023-03-26 05:54:20,701 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8195, 1.6964, 1.3903, 1.5000, 1.5739, 1.5415, 1.5844, 2.3683], + device='cuda:5'), covar=tensor([0.5946, 0.6297, 0.4715, 0.6216, 0.5598, 0.3545, 0.5960, 0.2205], + device='cuda:5'), in_proj_covar=tensor([0.0282, 0.0258, 0.0219, 0.0283, 0.0239, 0.0202, 0.0245, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:54:42,042 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.708e+02 2.011e+02 2.625e+02 4.679e+02, threshold=4.022e+02, percent-clipped=7.0 +2023-03-26 05:54:42,774 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0249, 1.8234, 1.9166, 0.9694, 2.1399, 2.3849, 1.9380, 1.8846], + device='cuda:5'), covar=tensor([0.1021, 0.0744, 0.0547, 0.0753, 0.0462, 0.0534, 0.0498, 0.0622], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0158, 0.0120, 0.0138, 0.0133, 0.0124, 0.0147, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.8371e-05, 1.1684e-04, 8.7377e-05, 1.0050e-04, 9.5406e-05, 9.1455e-05, + 1.0874e-04, 1.0755e-04], device='cuda:5') +2023-03-26 05:54:46,132 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=30829.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:54:52,232 INFO [finetune.py:976] (5/7) Epoch 6, batch 2200, loss[loss=0.2191, simple_loss=0.2634, pruned_loss=0.08738, over 4147.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2779, pruned_loss=0.07863, over 956687.66 frames. ], batch size: 17, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:55:16,390 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30865.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:55:37,003 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0242, 1.8284, 1.5511, 1.7383, 1.7533, 1.7147, 1.6687, 2.5471], + device='cuda:5'), covar=tensor([0.6397, 0.7326, 0.5110, 0.6961, 0.5743, 0.3828, 0.7085, 0.2455], + device='cuda:5'), in_proj_covar=tensor([0.0282, 0.0258, 0.0220, 0.0284, 0.0239, 0.0202, 0.0246, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:55:37,465 INFO [finetune.py:976] (5/7) Epoch 6, batch 2250, loss[loss=0.1752, simple_loss=0.2455, pruned_loss=0.05241, over 4761.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2779, pruned_loss=0.07818, over 955623.67 frames. ], batch size: 28, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:55:58,592 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30903.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:56:10,383 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=30913.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:56:22,248 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.065e+02 1.611e+02 1.958e+02 2.302e+02 5.232e+02, threshold=3.915e+02, percent-clipped=2.0 +2023-03-26 05:56:23,003 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30925.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:56:34,319 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30934.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:56:42,903 INFO [finetune.py:976] (5/7) Epoch 6, batch 2300, loss[loss=0.1911, simple_loss=0.2672, pruned_loss=0.05746, over 4927.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.278, pruned_loss=0.07762, over 954535.15 frames. ], batch size: 42, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:57:16,240 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30964.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:57:34,577 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1409, 2.1865, 2.3681, 1.2226, 2.5257, 2.8034, 2.3196, 2.1548], + device='cuda:5'), covar=tensor([0.0997, 0.0813, 0.0456, 0.0664, 0.0584, 0.0634, 0.0557, 0.0608], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0157, 0.0119, 0.0137, 0.0132, 0.0123, 0.0146, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.7596e-05, 1.1587e-04, 8.6636e-05, 9.9623e-05, 9.4956e-05, 9.0943e-05, + 1.0826e-04, 1.0673e-04], device='cuda:5') +2023-03-26 05:57:46,791 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30986.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:57:49,010 INFO [finetune.py:976] (5/7) Epoch 6, batch 2350, loss[loss=0.2199, simple_loss=0.2806, pruned_loss=0.07961, over 4793.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2752, pruned_loss=0.07685, over 955709.78 frames. ], batch size: 51, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:57:57,487 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30995.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:58:06,673 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0263, 2.0493, 1.9453, 1.3406, 2.2486, 2.2085, 2.1853, 1.8706], + device='cuda:5'), covar=tensor([0.0581, 0.0565, 0.0667, 0.0935, 0.0471, 0.0600, 0.0535, 0.0861], + device='cuda:5'), in_proj_covar=tensor([0.0139, 0.0135, 0.0144, 0.0128, 0.0113, 0.0144, 0.0146, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:58:07,215 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0087, 4.1701, 3.9001, 2.1970, 4.2592, 3.3261, 1.2438, 3.0470], + device='cuda:5'), covar=tensor([0.2316, 0.2089, 0.1524, 0.3191, 0.0964, 0.0902, 0.4363, 0.1483], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0170, 0.0163, 0.0128, 0.0156, 0.0123, 0.0145, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 05:58:19,822 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31011.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 05:58:19,853 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31011.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 05:58:28,248 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31016.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:58:33,050 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.621e+02 1.904e+02 2.250e+02 3.149e+02, threshold=3.808e+02, percent-clipped=0.0 +2023-03-26 05:58:41,923 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0453, 1.8383, 1.5836, 1.7582, 1.7781, 1.7183, 1.6874, 2.5185], + device='cuda:5'), covar=tensor([0.5566, 0.6605, 0.4649, 0.6076, 0.5611, 0.3316, 0.6155, 0.2130], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0259, 0.0220, 0.0285, 0.0241, 0.0203, 0.0247, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 05:58:53,274 INFO [finetune.py:976] (5/7) Epoch 6, batch 2400, loss[loss=0.2313, simple_loss=0.2856, pruned_loss=0.08844, over 4899.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2726, pruned_loss=0.07576, over 956304.70 frames. ], batch size: 36, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 05:59:17,402 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=31064.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 05:59:18,658 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1709, 1.7621, 2.4438, 4.2190, 3.0133, 2.8296, 0.7838, 3.3623], + device='cuda:5'), covar=tensor([0.1785, 0.1575, 0.1512, 0.0543, 0.0780, 0.1690, 0.2225, 0.0479], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0121, 0.0139, 0.0169, 0.0104, 0.0145, 0.0131, 0.0105], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 05:59:24,030 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31072.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 05:59:44,204 INFO [finetune.py:976] (5/7) Epoch 6, batch 2450, loss[loss=0.2273, simple_loss=0.2919, pruned_loss=0.08135, over 4863.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.269, pruned_loss=0.07458, over 957151.49 frames. ], batch size: 31, lr: 3.91e-03, grad_scale: 32.0 +2023-03-26 06:00:21,953 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.762e+02 2.221e+02 2.552e+02 5.044e+02, threshold=4.442e+02, percent-clipped=4.0 +2023-03-26 06:00:30,922 INFO [finetune.py:976] (5/7) Epoch 6, batch 2500, loss[loss=0.1801, simple_loss=0.249, pruned_loss=0.0556, over 4824.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2695, pruned_loss=0.07473, over 956925.88 frames. ], batch size: 33, lr: 3.91e-03, grad_scale: 16.0 +2023-03-26 06:00:31,604 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.81 vs. limit=5.0 +2023-03-26 06:00:38,174 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-26 06:00:56,628 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.48 vs. limit=5.0 +2023-03-26 06:01:06,651 INFO [finetune.py:976] (5/7) Epoch 6, batch 2550, loss[loss=0.2497, simple_loss=0.306, pruned_loss=0.09669, over 4836.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2741, pruned_loss=0.07687, over 956951.88 frames. ], batch size: 47, lr: 3.91e-03, grad_scale: 16.0 +2023-03-26 06:01:28,416 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6490, 1.5005, 1.4051, 1.6718, 1.9934, 1.7451, 1.1919, 1.3249], + device='cuda:5'), covar=tensor([0.2443, 0.2462, 0.2158, 0.1904, 0.1973, 0.1299, 0.2942, 0.1997], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0211, 0.0204, 0.0187, 0.0238, 0.0177, 0.0214, 0.0191], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:01:50,622 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.680e+02 2.034e+02 2.315e+02 3.655e+02, threshold=4.067e+02, percent-clipped=0.0 +2023-03-26 06:02:04,207 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2890, 1.3067, 1.3910, 1.5327, 1.4265, 2.9073, 1.2493, 1.5354], + device='cuda:5'), covar=tensor([0.1022, 0.1847, 0.1300, 0.1045, 0.1670, 0.0304, 0.1555, 0.1691], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0082, 0.0077, 0.0079, 0.0093, 0.0083, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 06:02:04,702 INFO [finetune.py:976] (5/7) Epoch 6, batch 2600, loss[loss=0.26, simple_loss=0.3182, pruned_loss=0.1009, over 4841.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2772, pruned_loss=0.07821, over 956303.69 frames. ], batch size: 47, lr: 3.91e-03, grad_scale: 16.0 +2023-03-26 06:02:05,431 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3315, 2.0772, 1.6828, 0.8197, 1.8394, 1.8214, 1.6165, 1.9165], + device='cuda:5'), covar=tensor([0.0870, 0.0878, 0.1631, 0.2143, 0.1470, 0.2472, 0.2294, 0.0983], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0201, 0.0201, 0.0189, 0.0216, 0.0208, 0.0220, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:02:33,423 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31259.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:02:55,619 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7747, 1.7265, 2.0695, 1.4509, 2.0024, 2.0753, 1.6395, 2.3260], + device='cuda:5'), covar=tensor([0.1526, 0.2249, 0.1502, 0.1950, 0.0932, 0.1521, 0.2718, 0.1128], + device='cuda:5'), in_proj_covar=tensor([0.0208, 0.0208, 0.0201, 0.0197, 0.0185, 0.0223, 0.0220, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:03:04,531 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31281.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:03:09,261 INFO [finetune.py:976] (5/7) Epoch 6, batch 2650, loss[loss=0.2473, simple_loss=0.3138, pruned_loss=0.09043, over 4822.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2794, pruned_loss=0.07906, over 956219.91 frames. ], batch size: 39, lr: 3.91e-03, grad_scale: 16.0 +2023-03-26 06:03:15,218 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31290.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:03:38,136 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31311.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:03:47,925 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.761e+02 2.106e+02 2.462e+02 3.966e+02, threshold=4.213e+02, percent-clipped=0.0 +2023-03-26 06:03:59,092 INFO [finetune.py:976] (5/7) Epoch 6, batch 2700, loss[loss=0.2142, simple_loss=0.2786, pruned_loss=0.07486, over 4321.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2783, pruned_loss=0.07822, over 956771.56 frames. ], batch size: 66, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:04:23,337 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=31359.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:04:33,878 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31367.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 06:04:43,302 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7033, 1.6728, 2.0701, 1.3731, 1.9257, 2.1103, 1.5861, 2.2555], + device='cuda:5'), covar=tensor([0.1450, 0.2151, 0.1451, 0.2109, 0.0915, 0.1339, 0.2817, 0.0831], + device='cuda:5'), in_proj_covar=tensor([0.0206, 0.0207, 0.0199, 0.0196, 0.0185, 0.0222, 0.0219, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:05:04,061 INFO [finetune.py:976] (5/7) Epoch 6, batch 2750, loss[loss=0.1818, simple_loss=0.2433, pruned_loss=0.06018, over 4872.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2765, pruned_loss=0.07787, over 958919.33 frames. ], batch size: 34, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:05:28,054 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31409.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:05:46,888 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.636e+02 1.989e+02 2.265e+02 3.886e+02, threshold=3.977e+02, percent-clipped=0.0 +2023-03-26 06:05:56,330 INFO [finetune.py:976] (5/7) Epoch 6, batch 2800, loss[loss=0.1779, simple_loss=0.2436, pruned_loss=0.05606, over 4859.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2732, pruned_loss=0.07678, over 959260.10 frames. ], batch size: 49, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:06:17,011 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31470.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:06:42,530 INFO [finetune.py:976] (5/7) Epoch 6, batch 2850, loss[loss=0.3074, simple_loss=0.3378, pruned_loss=0.1385, over 4912.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2713, pruned_loss=0.07616, over 957924.13 frames. ], batch size: 37, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:06:42,665 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4278, 2.1083, 1.6554, 0.7348, 1.8224, 1.9110, 1.6713, 1.8951], + device='cuda:5'), covar=tensor([0.0916, 0.0945, 0.1565, 0.2271, 0.1522, 0.2299, 0.2307, 0.1079], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0201, 0.0200, 0.0189, 0.0216, 0.0208, 0.0220, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:06:43,874 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31491.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 06:07:26,132 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.153e+02 1.576e+02 1.974e+02 2.520e+02 5.037e+02, threshold=3.948e+02, percent-clipped=2.0 +2023-03-26 06:07:36,767 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 06:07:42,539 INFO [finetune.py:976] (5/7) Epoch 6, batch 2900, loss[loss=0.2759, simple_loss=0.33, pruned_loss=0.1109, over 4900.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2737, pruned_loss=0.07701, over 956282.83 frames. ], batch size: 43, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:07:51,457 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4197, 1.4656, 1.1488, 1.4432, 1.7559, 1.5743, 1.5352, 1.2384], + device='cuda:5'), covar=tensor([0.0284, 0.0345, 0.0598, 0.0299, 0.0206, 0.0474, 0.0284, 0.0404], + device='cuda:5'), in_proj_covar=tensor([0.0087, 0.0111, 0.0137, 0.0117, 0.0104, 0.0100, 0.0090, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.7729e-05, 8.7784e-05, 1.0994e-04, 9.2217e-05, 8.1558e-05, 7.4498e-05, + 6.8898e-05, 8.4634e-05], device='cuda:5') +2023-03-26 06:07:59,532 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31552.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 06:08:04,306 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31559.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:08:13,366 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7314, 1.6821, 1.5977, 1.9976, 2.1429, 1.9193, 1.3604, 1.4694], + device='cuda:5'), covar=tensor([0.2698, 0.2376, 0.2193, 0.1819, 0.2132, 0.1366, 0.3058, 0.2326], + device='cuda:5'), in_proj_covar=tensor([0.0234, 0.0208, 0.0202, 0.0186, 0.0236, 0.0175, 0.0212, 0.0190], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:08:19,183 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31581.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:08:27,343 INFO [finetune.py:976] (5/7) Epoch 6, batch 2950, loss[loss=0.2558, simple_loss=0.3126, pruned_loss=0.09948, over 4811.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2763, pruned_loss=0.07756, over 957692.42 frames. ], batch size: 51, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:08:33,435 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31590.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:08:47,781 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=31607.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:08:59,952 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.780e+02 2.105e+02 2.565e+02 4.082e+02, threshold=4.210e+02, percent-clipped=1.0 +2023-03-26 06:09:02,952 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=31629.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:09:09,346 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=31638.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:09:09,893 INFO [finetune.py:976] (5/7) Epoch 6, batch 3000, loss[loss=0.2397, simple_loss=0.2954, pruned_loss=0.09203, over 4808.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2776, pruned_loss=0.0779, over 958435.06 frames. ], batch size: 39, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:09:09,893 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 06:09:15,243 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4558, 1.2418, 1.2667, 1.3304, 1.6384, 1.5613, 1.4014, 1.2066], + device='cuda:5'), covar=tensor([0.0303, 0.0308, 0.0573, 0.0355, 0.0252, 0.0365, 0.0329, 0.0443], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0111, 0.0137, 0.0116, 0.0103, 0.0100, 0.0090, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.7290e-05, 8.7190e-05, 1.0945e-04, 9.1587e-05, 8.1257e-05, 7.4104e-05, + 6.8327e-05, 8.4034e-05], device='cuda:5') +2023-03-26 06:09:23,489 INFO [finetune.py:1010] (5/7) Epoch 6, validation: loss=0.1625, simple_loss=0.2344, pruned_loss=0.04534, over 2265189.00 frames. +2023-03-26 06:09:23,490 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 06:09:42,570 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.65 vs. limit=5.0 +2023-03-26 06:09:55,806 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31667.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 06:10:19,291 INFO [finetune.py:976] (5/7) Epoch 6, batch 3050, loss[loss=0.2016, simple_loss=0.2712, pruned_loss=0.06604, over 4810.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2785, pruned_loss=0.07793, over 957490.35 frames. ], batch size: 38, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:10:26,977 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.07 vs. limit=2.0 +2023-03-26 06:10:28,742 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1668, 1.1539, 1.0428, 1.2055, 1.3933, 1.3544, 1.2534, 1.0832], + device='cuda:5'), covar=tensor([0.0368, 0.0342, 0.0606, 0.0298, 0.0280, 0.0457, 0.0291, 0.0394], + device='cuda:5'), in_proj_covar=tensor([0.0087, 0.0111, 0.0137, 0.0116, 0.0104, 0.0100, 0.0090, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.7715e-05, 8.7428e-05, 1.0989e-04, 9.1963e-05, 8.1764e-05, 7.4668e-05, + 6.8580e-05, 8.4412e-05], device='cuda:5') +2023-03-26 06:10:29,964 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31704.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:10:36,654 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=31715.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 06:10:40,378 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2060, 2.3047, 2.2297, 1.4886, 2.5350, 2.4897, 2.3365, 2.0698], + device='cuda:5'), covar=tensor([0.0638, 0.0583, 0.0722, 0.0957, 0.0440, 0.0711, 0.0711, 0.0959], + device='cuda:5'), in_proj_covar=tensor([0.0140, 0.0135, 0.0145, 0.0130, 0.0114, 0.0145, 0.0146, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:10:43,061 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.708e+02 2.072e+02 2.420e+02 4.871e+02, threshold=4.144e+02, percent-clipped=1.0 +2023-03-26 06:10:59,684 INFO [finetune.py:976] (5/7) Epoch 6, batch 3100, loss[loss=0.2206, simple_loss=0.2899, pruned_loss=0.07571, over 4891.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2761, pruned_loss=0.0772, over 955652.95 frames. ], batch size: 43, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:11:04,326 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.67 vs. limit=5.0 +2023-03-26 06:11:20,237 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31765.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:11:20,301 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31765.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:11:35,803 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31788.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:11:36,281 INFO [finetune.py:976] (5/7) Epoch 6, batch 3150, loss[loss=0.1889, simple_loss=0.2532, pruned_loss=0.06234, over 4858.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2738, pruned_loss=0.0768, over 954899.35 frames. ], batch size: 44, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:12:00,894 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31823.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:12:01,991 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.748e+01 1.682e+02 2.020e+02 2.535e+02 4.605e+02, threshold=4.040e+02, percent-clipped=1.0 +2023-03-26 06:12:15,954 INFO [finetune.py:976] (5/7) Epoch 6, batch 3200, loss[loss=0.2098, simple_loss=0.267, pruned_loss=0.07629, over 4807.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2703, pruned_loss=0.07542, over 955370.32 frames. ], batch size: 39, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:12:24,504 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4861, 1.3287, 1.2292, 1.4811, 1.6598, 1.4608, 0.8337, 1.2400], + device='cuda:5'), covar=tensor([0.2387, 0.2360, 0.2040, 0.1753, 0.1668, 0.1318, 0.2836, 0.1938], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0208, 0.0202, 0.0185, 0.0236, 0.0175, 0.0212, 0.0189], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:12:25,654 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31847.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 06:12:32,395 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31849.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:12:33,494 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6459, 1.5459, 1.5400, 1.6917, 1.0307, 3.4182, 1.3301, 1.8198], + device='cuda:5'), covar=tensor([0.3255, 0.2281, 0.1945, 0.2140, 0.1940, 0.0179, 0.2596, 0.1295], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0118, 0.0099, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 06:13:02,347 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4633, 1.8615, 0.9257, 2.3326, 2.6178, 1.9238, 2.1468, 2.1857], + device='cuda:5'), covar=tensor([0.1424, 0.1886, 0.2298, 0.1128, 0.1854, 0.1826, 0.1258, 0.1997], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0098, 0.0115, 0.0093, 0.0124, 0.0096, 0.0100, 0.0093], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 06:13:04,288 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0553, 1.8017, 1.5216, 1.6794, 1.7459, 1.6828, 1.7167, 2.5561], + device='cuda:5'), covar=tensor([0.5806, 0.7286, 0.4933, 0.6126, 0.5548, 0.3402, 0.5643, 0.2171], + device='cuda:5'), in_proj_covar=tensor([0.0280, 0.0256, 0.0218, 0.0282, 0.0238, 0.0201, 0.0243, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:13:06,107 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31878.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:13:11,180 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31884.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:13:14,083 INFO [finetune.py:976] (5/7) Epoch 6, batch 3250, loss[loss=0.3045, simple_loss=0.3314, pruned_loss=0.1388, over 4739.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.271, pruned_loss=0.07583, over 956545.68 frames. ], batch size: 54, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:13:28,422 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6740, 1.6226, 1.5782, 1.7426, 1.1675, 3.3888, 1.3712, 1.8994], + device='cuda:5'), covar=tensor([0.3324, 0.2485, 0.2031, 0.2244, 0.1962, 0.0205, 0.2507, 0.1281], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0118, 0.0099, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 06:13:51,946 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31915.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:14:01,794 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.695e+02 2.048e+02 2.376e+02 4.231e+02, threshold=4.096e+02, percent-clipped=1.0 +2023-03-26 06:14:09,531 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5731, 1.5207, 1.4157, 1.6419, 1.1631, 3.6613, 1.3940, 1.9984], + device='cuda:5'), covar=tensor([0.3574, 0.2612, 0.2241, 0.2350, 0.1990, 0.0170, 0.3013, 0.1354], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0115, 0.0119, 0.0123, 0.0118, 0.0099, 0.0102, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 06:14:17,634 INFO [finetune.py:976] (5/7) Epoch 6, batch 3300, loss[loss=0.2254, simple_loss=0.2852, pruned_loss=0.08274, over 4925.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2749, pruned_loss=0.07716, over 956999.67 frames. ], batch size: 38, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:14:17,762 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31939.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:14:29,148 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31950.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:14:46,584 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31976.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:14:54,840 INFO [finetune.py:976] (5/7) Epoch 6, batch 3350, loss[loss=0.2195, simple_loss=0.2845, pruned_loss=0.07725, over 4838.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2777, pruned_loss=0.07862, over 957012.15 frames. ], batch size: 49, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:15:11,495 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32011.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:15:22,691 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.186e+02 1.743e+02 2.097e+02 2.540e+02 4.089e+02, threshold=4.194e+02, percent-clipped=0.0 +2023-03-26 06:15:30,152 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8477, 3.3903, 3.5186, 3.7783, 3.6106, 3.4098, 3.9305, 1.1739], + device='cuda:5'), covar=tensor([0.0896, 0.0855, 0.0859, 0.0977, 0.1415, 0.1476, 0.0845, 0.5195], + device='cuda:5'), in_proj_covar=tensor([0.0357, 0.0244, 0.0278, 0.0295, 0.0336, 0.0286, 0.0305, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:15:33,885 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0572, 0.8632, 0.9430, 1.0169, 1.1910, 1.1419, 1.0071, 0.9346], + device='cuda:5'), covar=tensor([0.0294, 0.0316, 0.0530, 0.0264, 0.0257, 0.0389, 0.0278, 0.0417], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0111, 0.0137, 0.0116, 0.0103, 0.0099, 0.0090, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.7392e-05, 8.7511e-05, 1.0981e-04, 9.1375e-05, 8.1211e-05, 7.3765e-05, + 6.8460e-05, 8.4251e-05], device='cuda:5') +2023-03-26 06:15:41,803 INFO [finetune.py:976] (5/7) Epoch 6, batch 3400, loss[loss=0.2691, simple_loss=0.322, pruned_loss=0.1081, over 4780.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2791, pruned_loss=0.07922, over 955269.46 frames. ], batch size: 51, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:16:09,782 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32060.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:16:13,837 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32065.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:16:41,403 INFO [finetune.py:976] (5/7) Epoch 6, batch 3450, loss[loss=0.1813, simple_loss=0.252, pruned_loss=0.05528, over 4769.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2781, pruned_loss=0.07836, over 956163.70 frames. ], batch size: 28, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:16:41,564 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7286, 1.5480, 1.3419, 1.1705, 1.4950, 1.4529, 1.4625, 2.1297], + device='cuda:5'), covar=tensor([0.5492, 0.5690, 0.4409, 0.5165, 0.4876, 0.3039, 0.5075, 0.2195], + device='cuda:5'), in_proj_covar=tensor([0.0282, 0.0258, 0.0220, 0.0283, 0.0239, 0.0202, 0.0245, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:17:08,248 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32113.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:17:16,937 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.573e+02 1.926e+02 2.516e+02 4.351e+02, threshold=3.853e+02, percent-clipped=2.0 +2023-03-26 06:17:25,471 INFO [finetune.py:976] (5/7) Epoch 6, batch 3500, loss[loss=0.1964, simple_loss=0.2501, pruned_loss=0.07137, over 4833.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2745, pruned_loss=0.07719, over 956660.40 frames. ], batch size: 49, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:17:29,065 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32144.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:17:30,908 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32147.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 06:17:33,391 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-26 06:18:09,976 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32179.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:18:15,991 INFO [finetune.py:976] (5/7) Epoch 6, batch 3550, loss[loss=0.1813, simple_loss=0.2522, pruned_loss=0.05518, over 4897.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2724, pruned_loss=0.07622, over 956085.47 frames. ], batch size: 32, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:18:19,694 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32195.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 06:18:40,466 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.015e+02 1.617e+02 1.889e+02 2.318e+02 4.823e+02, threshold=3.777e+02, percent-clipped=2.0 +2023-03-26 06:18:52,028 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32234.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:19:00,025 INFO [finetune.py:976] (5/7) Epoch 6, batch 3600, loss[loss=0.1878, simple_loss=0.2495, pruned_loss=0.06307, over 4757.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2694, pruned_loss=0.07481, over 956320.09 frames. ], batch size: 26, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:19:31,873 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4794, 3.8616, 4.0669, 4.3347, 4.1625, 3.9666, 4.6034, 1.4079], + device='cuda:5'), covar=tensor([0.0840, 0.0935, 0.0830, 0.0998, 0.1270, 0.1580, 0.0601, 0.5617], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0241, 0.0274, 0.0293, 0.0333, 0.0283, 0.0300, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:19:36,023 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32271.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:19:38,301 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2970, 2.2213, 1.8138, 2.2150, 2.2203, 1.8333, 2.7855, 2.3294], + device='cuda:5'), covar=tensor([0.1682, 0.3433, 0.3981, 0.3951, 0.3294, 0.2124, 0.3897, 0.2337], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0236, 0.0255, 0.0233, 0.0192, 0.0213, 0.0192], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:19:57,776 INFO [finetune.py:976] (5/7) Epoch 6, batch 3650, loss[loss=0.1883, simple_loss=0.2498, pruned_loss=0.06344, over 4763.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2716, pruned_loss=0.0758, over 955058.46 frames. ], batch size: 26, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:20:14,267 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32306.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:20:18,614 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6698, 1.4712, 1.5191, 1.6043, 0.9693, 3.6051, 1.3129, 1.8568], + device='cuda:5'), covar=tensor([0.3251, 0.2414, 0.2088, 0.2226, 0.1967, 0.0161, 0.2650, 0.1305], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0115, 0.0118, 0.0123, 0.0118, 0.0099, 0.0102, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 06:20:26,743 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.020e+02 1.776e+02 2.129e+02 2.587e+02 5.126e+02, threshold=4.259e+02, percent-clipped=5.0 +2023-03-26 06:20:43,050 INFO [finetune.py:976] (5/7) Epoch 6, batch 3700, loss[loss=0.2185, simple_loss=0.273, pruned_loss=0.08203, over 4852.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2767, pruned_loss=0.0774, over 954613.53 frames. ], batch size: 31, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:20:56,535 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32360.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:21:16,590 INFO [finetune.py:976] (5/7) Epoch 6, batch 3750, loss[loss=0.1935, simple_loss=0.2591, pruned_loss=0.06392, over 4823.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2775, pruned_loss=0.07775, over 954644.65 frames. ], batch size: 25, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:21:37,052 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32408.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:21:48,323 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.626e+02 1.929e+02 2.294e+02 3.909e+02, threshold=3.857e+02, percent-clipped=0.0 +2023-03-26 06:21:58,657 INFO [finetune.py:976] (5/7) Epoch 6, batch 3800, loss[loss=0.1941, simple_loss=0.2601, pruned_loss=0.06405, over 4922.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2775, pruned_loss=0.07772, over 952274.04 frames. ], batch size: 33, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:22:01,781 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32444.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:22:13,837 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 06:22:24,406 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32479.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:22:31,247 INFO [finetune.py:976] (5/7) Epoch 6, batch 3850, loss[loss=0.225, simple_loss=0.2848, pruned_loss=0.08261, over 4206.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2761, pruned_loss=0.07723, over 953351.83 frames. ], batch size: 65, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:22:33,638 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32492.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:22:33,688 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7691, 1.6873, 1.9644, 2.0327, 1.7151, 3.6857, 1.4913, 1.7510], + device='cuda:5'), covar=tensor([0.0862, 0.1757, 0.1021, 0.0898, 0.1500, 0.0246, 0.1378, 0.1630], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0081, 0.0077, 0.0079, 0.0092, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 06:22:46,296 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0517, 1.9831, 1.9958, 1.2274, 2.2938, 2.2422, 2.0975, 1.7593], + device='cuda:5'), covar=tensor([0.0598, 0.0619, 0.0674, 0.0942, 0.0421, 0.0623, 0.0616, 0.1045], + device='cuda:5'), in_proj_covar=tensor([0.0140, 0.0136, 0.0145, 0.0129, 0.0114, 0.0146, 0.0147, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:22:53,999 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.617e+01 1.620e+02 2.056e+02 2.694e+02 5.558e+02, threshold=4.113e+02, percent-clipped=4.0 +2023-03-26 06:22:55,287 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32527.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:23:00,084 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32534.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:23:02,280 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4854, 1.4565, 1.3277, 1.3830, 1.7420, 1.6603, 1.5081, 1.3169], + device='cuda:5'), covar=tensor([0.0319, 0.0264, 0.0489, 0.0273, 0.0206, 0.0379, 0.0273, 0.0352], + device='cuda:5'), in_proj_covar=tensor([0.0087, 0.0111, 0.0137, 0.0116, 0.0103, 0.0099, 0.0090, 0.0107], + device='cuda:5'), out_proj_covar=tensor([6.7899e-05, 8.7442e-05, 1.0955e-04, 9.1235e-05, 8.1192e-05, 7.3670e-05, + 6.8664e-05, 8.3821e-05], device='cuda:5') +2023-03-26 06:23:04,461 INFO [finetune.py:976] (5/7) Epoch 6, batch 3900, loss[loss=0.1768, simple_loss=0.2425, pruned_loss=0.05557, over 4858.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2739, pruned_loss=0.07671, over 955404.25 frames. ], batch size: 44, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:23:24,749 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32571.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:23:31,334 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32582.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:23:36,048 INFO [finetune.py:976] (5/7) Epoch 6, batch 3950, loss[loss=0.2143, simple_loss=0.2769, pruned_loss=0.07586, over 4827.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2713, pruned_loss=0.07588, over 955643.10 frames. ], batch size: 33, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:23:59,359 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32606.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:24:07,665 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32619.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:24:11,251 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.672e+02 2.141e+02 2.509e+02 4.478e+02, threshold=4.281e+02, percent-clipped=2.0 +2023-03-26 06:24:20,839 INFO [finetune.py:976] (5/7) Epoch 6, batch 4000, loss[loss=0.227, simple_loss=0.2916, pruned_loss=0.08121, over 4928.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2704, pruned_loss=0.07536, over 955323.00 frames. ], batch size: 33, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:24:30,959 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=32654.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:24:31,667 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8425, 1.7293, 1.4388, 1.5042, 1.8355, 1.5463, 2.1464, 1.8044], + device='cuda:5'), covar=tensor([0.1655, 0.2750, 0.4061, 0.3487, 0.3030, 0.2003, 0.3590, 0.2192], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0236, 0.0254, 0.0232, 0.0191, 0.0212, 0.0191], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:25:03,181 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 06:25:04,477 INFO [finetune.py:976] (5/7) Epoch 6, batch 4050, loss[loss=0.2464, simple_loss=0.3096, pruned_loss=0.09159, over 4765.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2753, pruned_loss=0.07684, over 955664.74 frames. ], batch size: 54, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:25:28,439 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.831e+02 2.133e+02 2.637e+02 5.226e+02, threshold=4.267e+02, percent-clipped=1.0 +2023-03-26 06:25:42,718 INFO [finetune.py:976] (5/7) Epoch 6, batch 4100, loss[loss=0.2331, simple_loss=0.2934, pruned_loss=0.08637, over 4891.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.277, pruned_loss=0.07738, over 956098.28 frames. ], batch size: 35, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:26:15,832 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 06:26:34,196 INFO [finetune.py:976] (5/7) Epoch 6, batch 4150, loss[loss=0.1802, simple_loss=0.2562, pruned_loss=0.0521, over 4780.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2789, pruned_loss=0.0787, over 955588.49 frames. ], batch size: 29, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:27:18,095 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.203e+02 1.784e+02 2.149e+02 2.523e+02 4.029e+02, threshold=4.299e+02, percent-clipped=0.0 +2023-03-26 06:27:31,343 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6858, 1.7977, 1.4719, 1.5857, 1.8384, 1.8988, 1.7222, 1.4466], + device='cuda:5'), covar=tensor([0.0325, 0.0256, 0.0543, 0.0278, 0.0231, 0.0459, 0.0296, 0.0385], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0110, 0.0136, 0.0115, 0.0102, 0.0099, 0.0090, 0.0107], + device='cuda:5'), out_proj_covar=tensor([6.7137e-05, 8.6563e-05, 1.0880e-04, 9.0357e-05, 8.0778e-05, 7.3290e-05, + 6.8288e-05, 8.3607e-05], device='cuda:5') +2023-03-26 06:27:37,291 INFO [finetune.py:976] (5/7) Epoch 6, batch 4200, loss[loss=0.1864, simple_loss=0.2514, pruned_loss=0.06071, over 4818.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2791, pruned_loss=0.07856, over 953915.38 frames. ], batch size: 47, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:27:38,018 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4226, 1.4330, 1.6257, 1.6386, 1.5609, 3.2161, 1.2778, 1.6095], + device='cuda:5'), covar=tensor([0.1003, 0.1851, 0.1122, 0.1097, 0.1633, 0.0265, 0.1610, 0.1744], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0081, 0.0077, 0.0079, 0.0092, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 06:28:12,769 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 06:28:34,721 INFO [finetune.py:976] (5/7) Epoch 6, batch 4250, loss[loss=0.232, simple_loss=0.2713, pruned_loss=0.09631, over 4829.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.276, pruned_loss=0.07765, over 951218.77 frames. ], batch size: 41, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:28:44,215 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-26 06:29:25,480 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.595e+02 1.920e+02 2.303e+02 3.727e+02, threshold=3.841e+02, percent-clipped=0.0 +2023-03-26 06:29:44,605 INFO [finetune.py:976] (5/7) Epoch 6, batch 4300, loss[loss=0.1556, simple_loss=0.2251, pruned_loss=0.04302, over 4820.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2722, pruned_loss=0.07583, over 951632.91 frames. ], batch size: 40, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:29:57,832 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.11 vs. limit=5.0 +2023-03-26 06:30:17,445 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32966.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:30:47,044 INFO [finetune.py:976] (5/7) Epoch 6, batch 4350, loss[loss=0.2285, simple_loss=0.2838, pruned_loss=0.08663, over 4845.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2694, pruned_loss=0.07479, over 953103.19 frames. ], batch size: 47, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:31:32,014 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.730e+02 2.041e+02 2.589e+02 3.941e+02, threshold=4.082e+02, percent-clipped=1.0 +2023-03-26 06:31:38,492 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5075, 1.6083, 1.2798, 1.4267, 1.6777, 1.6571, 1.4420, 1.2797], + device='cuda:5'), covar=tensor([0.0280, 0.0287, 0.0514, 0.0332, 0.0225, 0.0416, 0.0310, 0.0424], + device='cuda:5'), in_proj_covar=tensor([0.0085, 0.0110, 0.0136, 0.0115, 0.0102, 0.0099, 0.0090, 0.0107], + device='cuda:5'), out_proj_covar=tensor([6.6881e-05, 8.6766e-05, 1.0856e-04, 9.0533e-05, 8.0784e-05, 7.3266e-05, + 6.8299e-05, 8.3774e-05], device='cuda:5') +2023-03-26 06:31:38,499 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33027.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:31:40,143 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-03-26 06:31:50,660 INFO [finetune.py:976] (5/7) Epoch 6, batch 4400, loss[loss=0.2095, simple_loss=0.2687, pruned_loss=0.0752, over 4914.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2712, pruned_loss=0.07577, over 952328.26 frames. ], batch size: 36, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:31:53,748 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 06:32:10,594 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7253, 2.4237, 2.8513, 1.9046, 2.6643, 2.9753, 2.1173, 3.0652], + device='cuda:5'), covar=tensor([0.1443, 0.1987, 0.1566, 0.2610, 0.1058, 0.1360, 0.2785, 0.0831], + device='cuda:5'), in_proj_covar=tensor([0.0202, 0.0202, 0.0197, 0.0193, 0.0182, 0.0218, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:32:31,753 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33069.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:32:54,587 INFO [finetune.py:976] (5/7) Epoch 6, batch 4450, loss[loss=0.2678, simple_loss=0.3159, pruned_loss=0.1099, over 4905.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2748, pruned_loss=0.07726, over 950620.56 frames. ], batch size: 36, lr: 3.90e-03, grad_scale: 16.0 +2023-03-26 06:32:56,347 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5959, 3.4089, 3.1675, 1.6856, 3.5872, 2.6881, 0.7252, 2.3495], + device='cuda:5'), covar=tensor([0.2440, 0.2367, 0.1879, 0.3478, 0.1249, 0.1034, 0.4787, 0.1689], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0171, 0.0162, 0.0127, 0.0155, 0.0121, 0.0144, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 06:33:39,236 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.714e+02 2.125e+02 2.690e+02 4.211e+02, threshold=4.250e+02, percent-clipped=1.0 +2023-03-26 06:33:48,025 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33130.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:33:58,505 INFO [finetune.py:976] (5/7) Epoch 6, batch 4500, loss[loss=0.2071, simple_loss=0.2747, pruned_loss=0.06977, over 4882.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2767, pruned_loss=0.07787, over 952092.89 frames. ], batch size: 32, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:34:19,039 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33155.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:35:01,064 INFO [finetune.py:976] (5/7) Epoch 6, batch 4550, loss[loss=0.2272, simple_loss=0.2944, pruned_loss=0.08, over 4720.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2779, pruned_loss=0.07841, over 950879.13 frames. ], batch size: 54, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:35:33,200 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33216.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:35:33,258 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 06:35:44,647 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.755e+02 2.084e+02 2.335e+02 4.621e+02, threshold=4.168e+02, percent-clipped=2.0 +2023-03-26 06:36:05,047 INFO [finetune.py:976] (5/7) Epoch 6, batch 4600, loss[loss=0.191, simple_loss=0.2642, pruned_loss=0.05895, over 4845.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2758, pruned_loss=0.0767, over 951070.62 frames. ], batch size: 49, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:36:24,595 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 06:36:33,165 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.38 vs. limit=5.0 +2023-03-26 06:37:07,547 INFO [finetune.py:976] (5/7) Epoch 6, batch 4650, loss[loss=0.2335, simple_loss=0.2794, pruned_loss=0.09379, over 4888.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.274, pruned_loss=0.07687, over 953249.79 frames. ], batch size: 43, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:37:48,447 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33322.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:37:50,592 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.141e+02 1.572e+02 1.961e+02 2.434e+02 3.752e+02, threshold=3.921e+02, percent-clipped=0.0 +2023-03-26 06:37:55,918 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9187, 1.7070, 1.4880, 1.5456, 1.6331, 1.6291, 1.6389, 2.3843], + device='cuda:5'), covar=tensor([0.5423, 0.5937, 0.4480, 0.5683, 0.5207, 0.3145, 0.5426, 0.2117], + device='cuda:5'), in_proj_covar=tensor([0.0281, 0.0258, 0.0220, 0.0282, 0.0239, 0.0202, 0.0245, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:38:10,953 INFO [finetune.py:976] (5/7) Epoch 6, batch 4700, loss[loss=0.1819, simple_loss=0.2408, pruned_loss=0.06148, over 4723.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2707, pruned_loss=0.07542, over 954660.33 frames. ], batch size: 23, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:38:48,506 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-03-26 06:39:20,540 INFO [finetune.py:976] (5/7) Epoch 6, batch 4750, loss[loss=0.1907, simple_loss=0.2541, pruned_loss=0.06366, over 4743.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2689, pruned_loss=0.07505, over 954217.55 frames. ], batch size: 27, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:40:04,159 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.633e+02 1.917e+02 2.350e+02 3.562e+02, threshold=3.835e+02, percent-clipped=0.0 +2023-03-26 06:40:04,241 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33425.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:40:24,230 INFO [finetune.py:976] (5/7) Epoch 6, batch 4800, loss[loss=0.2575, simple_loss=0.3199, pruned_loss=0.09758, over 4823.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2719, pruned_loss=0.07627, over 956305.68 frames. ], batch size: 33, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:40:35,548 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5316, 1.3814, 1.3901, 1.5059, 1.0038, 3.1628, 1.2066, 1.7812], + device='cuda:5'), covar=tensor([0.3390, 0.2476, 0.2162, 0.2365, 0.2043, 0.0214, 0.2906, 0.1364], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0114, 0.0118, 0.0122, 0.0118, 0.0099, 0.0102, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 06:41:07,478 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33472.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:41:20,107 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 06:41:28,612 INFO [finetune.py:976] (5/7) Epoch 6, batch 4850, loss[loss=0.1838, simple_loss=0.2495, pruned_loss=0.05909, over 4762.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2748, pruned_loss=0.07662, over 957058.40 frames. ], batch size: 26, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:41:31,691 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 06:41:59,613 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-26 06:41:59,905 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33511.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:42:14,018 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.841e+02 2.130e+02 2.477e+02 4.983e+02, threshold=4.260e+02, percent-clipped=3.0 +2023-03-26 06:42:24,113 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33533.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:42:32,811 INFO [finetune.py:976] (5/7) Epoch 6, batch 4900, loss[loss=0.1801, simple_loss=0.255, pruned_loss=0.05263, over 4740.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2762, pruned_loss=0.07762, over 955295.71 frames. ], batch size: 27, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:43:15,996 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-03-26 06:43:24,236 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9444, 1.6796, 1.5226, 1.7378, 1.6525, 1.6671, 1.5918, 2.4312], + device='cuda:5'), covar=tensor([0.5962, 0.7112, 0.4746, 0.6565, 0.6390, 0.3395, 0.6682, 0.2328], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0260, 0.0222, 0.0285, 0.0241, 0.0205, 0.0247, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:43:36,450 INFO [finetune.py:976] (5/7) Epoch 6, batch 4950, loss[loss=0.1816, simple_loss=0.2497, pruned_loss=0.05678, over 4890.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2765, pruned_loss=0.07644, over 956812.61 frames. ], batch size: 37, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:44:20,475 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33622.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:44:22,686 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.562e+01 1.670e+02 2.087e+02 2.382e+02 5.310e+02, threshold=4.173e+02, percent-clipped=3.0 +2023-03-26 06:44:41,927 INFO [finetune.py:976] (5/7) Epoch 6, batch 5000, loss[loss=0.2213, simple_loss=0.2766, pruned_loss=0.08303, over 4831.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2737, pruned_loss=0.0752, over 954504.48 frames. ], batch size: 33, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:45:09,558 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5497, 1.2719, 1.3590, 1.3353, 1.6769, 1.5971, 1.5087, 1.2860], + device='cuda:5'), covar=tensor([0.0274, 0.0294, 0.0506, 0.0300, 0.0220, 0.0449, 0.0266, 0.0398], + device='cuda:5'), in_proj_covar=tensor([0.0086, 0.0109, 0.0135, 0.0114, 0.0102, 0.0098, 0.0089, 0.0107], + device='cuda:5'), out_proj_covar=tensor([6.6835e-05, 8.5517e-05, 1.0837e-04, 9.0150e-05, 8.0601e-05, 7.2822e-05, + 6.7690e-05, 8.3170e-05], device='cuda:5') +2023-03-26 06:45:14,311 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=33670.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:45:19,064 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33677.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:45:20,292 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.74 vs. limit=5.0 +2023-03-26 06:45:26,105 INFO [finetune.py:976] (5/7) Epoch 6, batch 5050, loss[loss=0.2196, simple_loss=0.2746, pruned_loss=0.08232, over 4943.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2715, pruned_loss=0.07488, over 954793.93 frames. ], batch size: 33, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:45:50,304 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.621e+02 1.877e+02 2.380e+02 3.773e+02, threshold=3.754e+02, percent-clipped=0.0 +2023-03-26 06:45:50,428 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33725.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:45:50,704 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.08 vs. limit=5.0 +2023-03-26 06:45:58,833 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33738.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 06:45:59,305 INFO [finetune.py:976] (5/7) Epoch 6, batch 5100, loss[loss=0.238, simple_loss=0.2821, pruned_loss=0.09697, over 4919.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2673, pruned_loss=0.07305, over 955315.99 frames. ], batch size: 37, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:46:20,225 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6682, 1.5559, 1.5828, 1.5920, 1.0830, 3.3889, 1.3681, 1.9924], + device='cuda:5'), covar=tensor([0.3449, 0.2592, 0.2038, 0.2426, 0.2022, 0.0210, 0.2568, 0.1274], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0117, 0.0098, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 06:46:21,429 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6345, 1.5226, 1.4941, 1.5760, 1.0699, 3.2795, 1.3215, 1.9857], + device='cuda:5'), covar=tensor([0.3115, 0.2378, 0.2022, 0.2146, 0.1893, 0.0208, 0.2453, 0.1209], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0117, 0.0098, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 06:46:22,564 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=33773.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:46:32,748 INFO [finetune.py:976] (5/7) Epoch 6, batch 5150, loss[loss=0.2485, simple_loss=0.3077, pruned_loss=0.09468, over 4868.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2679, pruned_loss=0.07385, over 951957.35 frames. ], batch size: 44, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:46:48,246 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33811.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:46:57,550 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.768e+02 2.107e+02 2.614e+02 3.782e+02, threshold=4.214e+02, percent-clipped=1.0 +2023-03-26 06:46:59,464 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33828.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:47:06,634 INFO [finetune.py:976] (5/7) Epoch 6, batch 5200, loss[loss=0.2498, simple_loss=0.3052, pruned_loss=0.09724, over 4906.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2709, pruned_loss=0.07459, over 951645.02 frames. ], batch size: 37, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:47:12,042 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.57 vs. limit=5.0 +2023-03-26 06:47:19,516 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=33859.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:47:44,970 INFO [finetune.py:976] (5/7) Epoch 6, batch 5250, loss[loss=0.1718, simple_loss=0.2452, pruned_loss=0.04915, over 4102.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2728, pruned_loss=0.07538, over 950009.08 frames. ], batch size: 65, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:48:01,354 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8461, 4.4320, 4.2814, 2.2097, 4.5938, 3.5102, 0.9448, 3.2074], + device='cuda:5'), covar=tensor([0.2286, 0.1455, 0.1214, 0.3029, 0.0644, 0.0732, 0.4319, 0.1248], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0171, 0.0162, 0.0127, 0.0155, 0.0122, 0.0146, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 06:48:03,019 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9230, 1.3788, 0.8294, 1.9173, 2.1932, 1.7501, 1.5424, 1.8464], + device='cuda:5'), covar=tensor([0.1514, 0.2035, 0.2326, 0.1182, 0.2138, 0.2018, 0.1477, 0.1879], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0099, 0.0116, 0.0094, 0.0125, 0.0097, 0.0102, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 06:48:10,022 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.099e+02 1.728e+02 2.112e+02 2.559e+02 4.196e+02, threshold=4.224e+02, percent-clipped=0.0 +2023-03-26 06:48:18,712 INFO [finetune.py:976] (5/7) Epoch 6, batch 5300, loss[loss=0.2311, simple_loss=0.2938, pruned_loss=0.08424, over 4797.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2752, pruned_loss=0.07622, over 952804.08 frames. ], batch size: 51, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:48:19,384 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33939.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:48:32,951 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 06:48:53,986 INFO [finetune.py:976] (5/7) Epoch 6, batch 5350, loss[loss=0.1947, simple_loss=0.2572, pruned_loss=0.06615, over 4815.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2748, pruned_loss=0.07593, over 953729.87 frames. ], batch size: 39, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:49:07,552 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34000.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:49:18,199 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-26 06:49:40,309 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.730e+02 2.013e+02 2.437e+02 5.230e+02, threshold=4.026e+02, percent-clipped=3.0 +2023-03-26 06:49:50,509 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34033.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 06:49:59,343 INFO [finetune.py:976] (5/7) Epoch 6, batch 5400, loss[loss=0.1835, simple_loss=0.2487, pruned_loss=0.05916, over 4903.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2713, pruned_loss=0.07457, over 954178.29 frames. ], batch size: 46, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:50:02,515 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34044.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:50:18,891 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.91 vs. limit=5.0 +2023-03-26 06:50:31,405 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.05 vs. limit=5.0 +2023-03-26 06:50:51,577 INFO [finetune.py:976] (5/7) Epoch 6, batch 5450, loss[loss=0.1859, simple_loss=0.2492, pruned_loss=0.06129, over 4909.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2683, pruned_loss=0.07375, over 953680.91 frames. ], batch size: 36, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:50:52,003 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 06:51:04,654 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34105.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:51:17,056 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.588e+02 1.777e+02 2.163e+02 4.002e+02, threshold=3.553e+02, percent-clipped=0.0 +2023-03-26 06:51:19,926 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34128.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:51:24,471 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4650, 1.6228, 1.6839, 0.8537, 1.6591, 1.9249, 1.9016, 1.5440], + device='cuda:5'), covar=tensor([0.0999, 0.0557, 0.0444, 0.0671, 0.0467, 0.0608, 0.0323, 0.0663], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0157, 0.0120, 0.0137, 0.0131, 0.0124, 0.0146, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.6904e-05, 1.1587e-04, 8.7123e-05, 1.0019e-04, 9.4003e-05, 9.1207e-05, + 1.0824e-04, 1.0702e-04], device='cuda:5') +2023-03-26 06:51:27,293 INFO [finetune.py:976] (5/7) Epoch 6, batch 5500, loss[loss=0.2301, simple_loss=0.2891, pruned_loss=0.08552, over 4845.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2659, pruned_loss=0.07312, over 953134.44 frames. ], batch size: 47, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:51:30,405 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5454, 1.5608, 1.8907, 1.7427, 1.6502, 3.3489, 1.4681, 1.7182], + device='cuda:5'), covar=tensor([0.0931, 0.1586, 0.1168, 0.1012, 0.1456, 0.0259, 0.1363, 0.1490], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0081, 0.0076, 0.0079, 0.0092, 0.0083, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 06:51:31,005 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34145.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:51:50,701 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=34176.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:52:00,938 INFO [finetune.py:976] (5/7) Epoch 6, batch 5550, loss[loss=0.1833, simple_loss=0.2406, pruned_loss=0.06302, over 4195.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.27, pruned_loss=0.07559, over 953986.82 frames. ], batch size: 18, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:52:05,284 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5544, 1.3722, 2.2068, 3.2396, 2.1613, 2.1454, 1.2486, 2.5012], + device='cuda:5'), covar=tensor([0.1689, 0.1604, 0.1161, 0.0547, 0.0871, 0.1587, 0.1638, 0.0567], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0119, 0.0137, 0.0167, 0.0103, 0.0141, 0.0129, 0.0103], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 06:52:15,711 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34206.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:52:37,391 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.703e+02 1.979e+02 2.376e+02 4.570e+02, threshold=3.959e+02, percent-clipped=2.0 +2023-03-26 06:52:55,407 INFO [finetune.py:976] (5/7) Epoch 6, batch 5600, loss[loss=0.2575, simple_loss=0.3156, pruned_loss=0.09969, over 4910.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2735, pruned_loss=0.07599, over 953168.47 frames. ], batch size: 37, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:53:54,497 INFO [finetune.py:976] (5/7) Epoch 6, batch 5650, loss[loss=0.1716, simple_loss=0.2478, pruned_loss=0.04766, over 4792.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2763, pruned_loss=0.07651, over 952526.20 frames. ], batch size: 29, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:53:58,429 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34295.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:54:25,006 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0533, 2.7133, 2.3349, 1.5746, 2.5271, 2.4821, 2.2000, 2.3985], + device='cuda:5'), covar=tensor([0.0768, 0.0783, 0.1304, 0.1902, 0.1190, 0.1523, 0.1807, 0.1005], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0199, 0.0188, 0.0215, 0.0206, 0.0218, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 06:54:35,326 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.805e+01 1.675e+02 2.029e+02 2.481e+02 4.265e+02, threshold=4.057e+02, percent-clipped=3.0 +2023-03-26 06:54:40,138 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34333.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:54:48,434 INFO [finetune.py:976] (5/7) Epoch 6, batch 5700, loss[loss=0.1894, simple_loss=0.2389, pruned_loss=0.06995, over 4211.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2726, pruned_loss=0.07638, over 931530.36 frames. ], batch size: 18, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:55:18,048 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6658, 1.5629, 1.4240, 1.3957, 1.7756, 1.7797, 1.5683, 1.3679], + device='cuda:5'), covar=tensor([0.0323, 0.0295, 0.0558, 0.0315, 0.0275, 0.0398, 0.0313, 0.0384], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0112, 0.0140, 0.0118, 0.0105, 0.0101, 0.0091, 0.0110], + device='cuda:5'), out_proj_covar=tensor([6.9042e-05, 8.8174e-05, 1.1210e-04, 9.2656e-05, 8.3013e-05, 7.4811e-05, + 6.9318e-05, 8.5495e-05], device='cuda:5') +2023-03-26 06:55:38,390 INFO [finetune.py:976] (5/7) Epoch 7, batch 0, loss[loss=0.1922, simple_loss=0.266, pruned_loss=0.05924, over 4755.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.266, pruned_loss=0.05924, over 4755.00 frames. ], batch size: 28, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:55:38,390 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 06:55:55,927 INFO [finetune.py:1010] (5/7) Epoch 7, validation: loss=0.165, simple_loss=0.2365, pruned_loss=0.04677, over 2265189.00 frames. +2023-03-26 06:55:55,927 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 06:56:14,681 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=34381.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:56:36,914 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34400.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:56:59,207 INFO [finetune.py:976] (5/7) Epoch 7, batch 50, loss[loss=0.214, simple_loss=0.2809, pruned_loss=0.07352, over 4728.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2781, pruned_loss=0.07824, over 215423.38 frames. ], batch size: 59, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:57:04,149 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5283, 1.4997, 1.7785, 1.7636, 1.6571, 3.4557, 1.3882, 1.5915], + device='cuda:5'), covar=tensor([0.1066, 0.1881, 0.1142, 0.1052, 0.1732, 0.0217, 0.1550, 0.1794], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0081, 0.0076, 0.0079, 0.0092, 0.0083, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 06:57:09,546 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.105e+02 1.600e+02 2.022e+02 2.565e+02 5.766e+02, threshold=4.045e+02, percent-clipped=4.0 +2023-03-26 06:57:19,391 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 06:58:05,578 INFO [finetune.py:976] (5/7) Epoch 7, batch 100, loss[loss=0.2041, simple_loss=0.2579, pruned_loss=0.07517, over 4798.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2695, pruned_loss=0.07408, over 380429.46 frames. ], batch size: 45, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:58:29,314 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4677, 1.4988, 1.6318, 1.7915, 1.5490, 3.2462, 1.3298, 1.5842], + device='cuda:5'), covar=tensor([0.1026, 0.1786, 0.1160, 0.0980, 0.1587, 0.0262, 0.1514, 0.1693], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0080, 0.0076, 0.0079, 0.0092, 0.0083, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 06:58:45,259 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34501.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 06:59:06,469 INFO [finetune.py:976] (5/7) Epoch 7, batch 150, loss[loss=0.1954, simple_loss=0.2523, pruned_loss=0.06923, over 4911.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2651, pruned_loss=0.07215, over 508717.08 frames. ], batch size: 37, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 06:59:07,719 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0773, 0.8228, 0.9224, 1.0621, 1.1912, 1.1364, 1.0388, 1.0073], + device='cuda:5'), covar=tensor([0.0303, 0.0341, 0.0588, 0.0300, 0.0272, 0.0424, 0.0310, 0.0410], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0111, 0.0139, 0.0116, 0.0104, 0.0100, 0.0091, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.8405e-05, 8.7596e-05, 1.1116e-04, 9.1828e-05, 8.2120e-05, 7.4255e-05, + 6.8972e-05, 8.4730e-05], device='cuda:5') +2023-03-26 06:59:17,660 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.007e+02 1.586e+02 1.902e+02 2.328e+02 6.438e+02, threshold=3.804e+02, percent-clipped=3.0 +2023-03-26 06:59:26,644 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1791, 2.0908, 1.7329, 2.0928, 1.9552, 1.8977, 1.8782, 2.8355], + device='cuda:5'), covar=tensor([0.5802, 0.7659, 0.4935, 0.7274, 0.6580, 0.3573, 0.7282, 0.2222], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0258, 0.0220, 0.0282, 0.0240, 0.0204, 0.0245, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:00:10,778 INFO [finetune.py:976] (5/7) Epoch 7, batch 200, loss[loss=0.2797, simple_loss=0.3326, pruned_loss=0.1134, over 4747.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2657, pruned_loss=0.07366, over 608720.39 frames. ], batch size: 59, lr: 3.89e-03, grad_scale: 32.0 +2023-03-26 07:00:42,237 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34593.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:00:43,480 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34595.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:01:13,423 INFO [finetune.py:976] (5/7) Epoch 7, batch 250, loss[loss=0.1995, simple_loss=0.2642, pruned_loss=0.06738, over 4829.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2701, pruned_loss=0.0755, over 684373.24 frames. ], batch size: 33, lr: 3.88e-03, grad_scale: 32.0 +2023-03-26 07:01:22,750 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34622.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:01:24,456 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.737e+02 2.023e+02 2.550e+02 3.958e+02, threshold=4.047e+02, percent-clipped=1.0 +2023-03-26 07:01:25,210 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34626.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:01:35,279 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5872, 1.4103, 1.7065, 1.9233, 1.5625, 3.3581, 1.3143, 1.5346], + device='cuda:5'), covar=tensor([0.0968, 0.1839, 0.1267, 0.1008, 0.1619, 0.0227, 0.1542, 0.1773], + device='cuda:5'), in_proj_covar=tensor([0.0078, 0.0081, 0.0077, 0.0080, 0.0093, 0.0084, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 07:01:45,027 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=34643.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:02:01,674 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34654.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:02:14,154 INFO [finetune.py:976] (5/7) Epoch 7, batch 300, loss[loss=0.1834, simple_loss=0.2447, pruned_loss=0.06104, over 4108.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2727, pruned_loss=0.07619, over 745535.44 frames. ], batch size: 17, lr: 3.88e-03, grad_scale: 32.0 +2023-03-26 07:02:23,944 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.2674, 4.6844, 4.9719, 4.9173, 4.7702, 4.5975, 5.3957, 1.7396], + device='cuda:5'), covar=tensor([0.0865, 0.1369, 0.0924, 0.1898, 0.1588, 0.1753, 0.0697, 0.6899], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0244, 0.0275, 0.0295, 0.0336, 0.0285, 0.0304, 0.0298], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:02:34,511 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34683.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 07:02:41,997 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34687.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:02:55,206 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34700.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:03:06,644 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34710.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:03:15,679 INFO [finetune.py:976] (5/7) Epoch 7, batch 350, loss[loss=0.2633, simple_loss=0.319, pruned_loss=0.1038, over 4886.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2745, pruned_loss=0.07643, over 793772.86 frames. ], batch size: 32, lr: 3.88e-03, grad_scale: 32.0 +2023-03-26 07:03:26,685 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7964, 1.6602, 1.6296, 1.8183, 1.4383, 4.3531, 1.5861, 2.3428], + device='cuda:5'), covar=tensor([0.3256, 0.2577, 0.2108, 0.2227, 0.1729, 0.0097, 0.2529, 0.1282], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0117, 0.0122, 0.0116, 0.0098, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 07:03:27,162 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.664e+02 2.043e+02 2.496e+02 5.690e+02, threshold=4.087e+02, percent-clipped=3.0 +2023-03-26 07:03:47,061 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 07:03:53,905 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 07:03:54,753 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=34748.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:04:16,439 INFO [finetune.py:976] (5/7) Epoch 7, batch 400, loss[loss=0.2201, simple_loss=0.2849, pruned_loss=0.07768, over 4885.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2755, pruned_loss=0.07676, over 830007.10 frames. ], batch size: 35, lr: 3.88e-03, grad_scale: 32.0 +2023-03-26 07:04:24,051 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34771.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 07:04:55,093 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34801.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:05:14,339 INFO [finetune.py:976] (5/7) Epoch 7, batch 450, loss[loss=0.1887, simple_loss=0.2498, pruned_loss=0.06383, over 4754.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2746, pruned_loss=0.07611, over 857300.34 frames. ], batch size: 23, lr: 3.88e-03, grad_scale: 32.0 +2023-03-26 07:05:15,660 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9640, 1.2661, 0.7579, 1.8758, 2.2701, 1.7112, 1.5694, 1.8086], + device='cuda:5'), covar=tensor([0.2006, 0.2952, 0.2899, 0.1578, 0.2414, 0.2549, 0.2031, 0.2771], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0098, 0.0115, 0.0093, 0.0124, 0.0096, 0.0100, 0.0093], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 07:05:25,983 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.133e+01 1.634e+02 1.827e+02 2.211e+02 3.915e+02, threshold=3.654e+02, percent-clipped=0.0 +2023-03-26 07:05:50,917 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=34849.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:06:11,595 INFO [finetune.py:976] (5/7) Epoch 7, batch 500, loss[loss=0.177, simple_loss=0.234, pruned_loss=0.06004, over 4016.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2716, pruned_loss=0.07477, over 877866.96 frames. ], batch size: 17, lr: 3.88e-03, grad_scale: 32.0 +2023-03-26 07:07:15,207 INFO [finetune.py:976] (5/7) Epoch 7, batch 550, loss[loss=0.1941, simple_loss=0.2652, pruned_loss=0.06147, over 4847.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2689, pruned_loss=0.07387, over 897010.68 frames. ], batch size: 47, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:07:26,485 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.074e+02 1.634e+02 2.013e+02 2.383e+02 4.182e+02, threshold=4.026e+02, percent-clipped=3.0 +2023-03-26 07:07:53,608 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34949.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:08:14,352 INFO [finetune.py:976] (5/7) Epoch 7, batch 600, loss[loss=0.2084, simple_loss=0.2788, pruned_loss=0.06902, over 4815.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2699, pruned_loss=0.07398, over 906567.33 frames. ], batch size: 40, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:08:19,801 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2031, 1.9836, 2.7653, 1.5581, 2.3037, 2.3609, 1.7273, 2.7230], + device='cuda:5'), covar=tensor([0.1744, 0.2216, 0.1369, 0.2517, 0.1214, 0.1956, 0.2831, 0.1054], + device='cuda:5'), in_proj_covar=tensor([0.0205, 0.0206, 0.0199, 0.0197, 0.0183, 0.0221, 0.0219, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:08:31,420 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34978.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 07:08:35,299 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34982.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:08:44,954 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0654, 1.8294, 1.7149, 1.8972, 2.4663, 2.0205, 1.7590, 1.4945], + device='cuda:5'), covar=tensor([0.2261, 0.2300, 0.1988, 0.1855, 0.2080, 0.1136, 0.2407, 0.1972], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0209, 0.0204, 0.0186, 0.0237, 0.0176, 0.0212, 0.0190], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:09:06,447 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 07:09:16,191 INFO [finetune.py:976] (5/7) Epoch 7, batch 650, loss[loss=0.1503, simple_loss=0.2126, pruned_loss=0.04398, over 4796.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2729, pruned_loss=0.07533, over 916193.51 frames. ], batch size: 25, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:09:27,415 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.723e+02 2.031e+02 2.472e+02 3.902e+02, threshold=4.061e+02, percent-clipped=0.0 +2023-03-26 07:10:17,351 INFO [finetune.py:976] (5/7) Epoch 7, batch 700, loss[loss=0.2251, simple_loss=0.2929, pruned_loss=0.07865, over 4814.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2744, pruned_loss=0.07562, over 924485.04 frames. ], batch size: 45, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:10:17,428 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35066.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 07:10:46,482 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35092.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:10:55,786 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1318, 1.2713, 1.5258, 1.0143, 1.1954, 1.3490, 1.2533, 1.4934], + device='cuda:5'), covar=tensor([0.1356, 0.2361, 0.1395, 0.1735, 0.1072, 0.1319, 0.2954, 0.0909], + device='cuda:5'), in_proj_covar=tensor([0.0205, 0.0206, 0.0199, 0.0197, 0.0182, 0.0221, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:11:11,734 INFO [finetune.py:976] (5/7) Epoch 7, batch 750, loss[loss=0.1873, simple_loss=0.2545, pruned_loss=0.06003, over 4830.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2765, pruned_loss=0.07693, over 930935.13 frames. ], batch size: 30, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:11:23,578 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.641e+02 2.027e+02 2.429e+02 3.682e+02, threshold=4.054e+02, percent-clipped=0.0 +2023-03-26 07:11:54,082 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 07:11:55,044 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8756, 1.6406, 2.3020, 1.5184, 2.0351, 2.2100, 1.6423, 2.3719], + device='cuda:5'), covar=tensor([0.1654, 0.2487, 0.1524, 0.2716, 0.1085, 0.1628, 0.3138, 0.0923], + device='cuda:5'), in_proj_covar=tensor([0.0205, 0.0206, 0.0198, 0.0197, 0.0182, 0.0221, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:12:01,755 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35153.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:12:14,609 INFO [finetune.py:976] (5/7) Epoch 7, batch 800, loss[loss=0.2166, simple_loss=0.2815, pruned_loss=0.07587, over 4874.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2758, pruned_loss=0.07616, over 934117.54 frames. ], batch size: 32, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:13:17,537 INFO [finetune.py:976] (5/7) Epoch 7, batch 850, loss[loss=0.1913, simple_loss=0.262, pruned_loss=0.06024, over 4916.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2725, pruned_loss=0.07505, over 938629.68 frames. ], batch size: 37, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:13:27,729 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.645e+02 1.948e+02 2.227e+02 3.525e+02, threshold=3.897e+02, percent-clipped=0.0 +2023-03-26 07:13:33,523 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9918, 1.8466, 2.3864, 1.5793, 2.1939, 2.2564, 1.7339, 2.4947], + device='cuda:5'), covar=tensor([0.1489, 0.1990, 0.1423, 0.2262, 0.1025, 0.1489, 0.2621, 0.0844], + device='cuda:5'), in_proj_covar=tensor([0.0206, 0.0207, 0.0199, 0.0198, 0.0183, 0.0223, 0.0220, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:13:55,193 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35249.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:14:14,657 INFO [finetune.py:976] (5/7) Epoch 7, batch 900, loss[loss=0.1609, simple_loss=0.232, pruned_loss=0.04494, over 4775.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2682, pruned_loss=0.07348, over 941265.73 frames. ], batch size: 28, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:14:32,803 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35278.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 07:14:33,773 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-26 07:14:35,292 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35282.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:14:55,291 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=35297.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:15:06,014 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.76 vs. limit=5.0 +2023-03-26 07:15:16,597 INFO [finetune.py:976] (5/7) Epoch 7, batch 950, loss[loss=0.228, simple_loss=0.3012, pruned_loss=0.07746, over 4816.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2687, pruned_loss=0.07424, over 945196.71 frames. ], batch size: 51, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:15:31,370 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.193e+02 1.515e+02 1.811e+02 2.306e+02 3.628e+02, threshold=3.621e+02, percent-clipped=0.0 +2023-03-26 07:15:31,448 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=35326.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:15:33,894 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=35330.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:15:50,906 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35345.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 07:16:18,535 INFO [finetune.py:976] (5/7) Epoch 7, batch 1000, loss[loss=0.2429, simple_loss=0.2878, pruned_loss=0.09901, over 4901.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2737, pruned_loss=0.07665, over 947886.52 frames. ], batch size: 36, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:16:18,666 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35366.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 07:16:27,462 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35374.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:17:02,320 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 07:17:08,115 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35406.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 07:17:17,934 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=35414.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:17:19,086 INFO [finetune.py:976] (5/7) Epoch 7, batch 1050, loss[loss=0.1695, simple_loss=0.2466, pruned_loss=0.04623, over 4749.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2756, pruned_loss=0.07669, over 951054.36 frames. ], batch size: 26, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:17:30,142 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 1.696e+02 1.924e+02 2.371e+02 5.787e+02, threshold=3.848e+02, percent-clipped=4.0 +2023-03-26 07:17:41,407 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35435.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:17:59,222 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8827, 1.8851, 1.9140, 1.2604, 2.1538, 2.0030, 1.9529, 1.6450], + device='cuda:5'), covar=tensor([0.0697, 0.0739, 0.0831, 0.1083, 0.0493, 0.0863, 0.0736, 0.1135], + device='cuda:5'), in_proj_covar=tensor([0.0138, 0.0134, 0.0143, 0.0127, 0.0113, 0.0144, 0.0145, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:17:59,816 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35448.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:18:20,380 INFO [finetune.py:976] (5/7) Epoch 7, batch 1100, loss[loss=0.1911, simple_loss=0.2583, pruned_loss=0.06197, over 4717.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2776, pruned_loss=0.07799, over 950890.52 frames. ], batch size: 59, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:19:22,492 INFO [finetune.py:976] (5/7) Epoch 7, batch 1150, loss[loss=0.227, simple_loss=0.2844, pruned_loss=0.08484, over 4821.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2799, pruned_loss=0.07921, over 952711.50 frames. ], batch size: 39, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:19:33,762 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.207e+02 1.764e+02 2.068e+02 2.432e+02 4.937e+02, threshold=4.137e+02, percent-clipped=2.0 +2023-03-26 07:19:41,298 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35530.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:19:53,672 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.0157, 4.3575, 4.5877, 4.8400, 4.6994, 4.5191, 5.1468, 1.5031], + device='cuda:5'), covar=tensor([0.0620, 0.0742, 0.0609, 0.0773, 0.1069, 0.1194, 0.0470, 0.5128], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0241, 0.0271, 0.0290, 0.0330, 0.0281, 0.0302, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:20:15,508 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.02 vs. limit=5.0 +2023-03-26 07:20:24,976 INFO [finetune.py:976] (5/7) Epoch 7, batch 1200, loss[loss=0.2067, simple_loss=0.2693, pruned_loss=0.07203, over 4749.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2772, pruned_loss=0.07758, over 955872.95 frames. ], batch size: 26, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:20:25,657 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.6391, 4.0451, 4.1852, 4.4295, 4.3741, 4.1397, 4.7159, 1.7645], + device='cuda:5'), covar=tensor([0.0734, 0.0890, 0.0747, 0.0847, 0.1253, 0.1233, 0.0670, 0.4736], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0242, 0.0272, 0.0291, 0.0330, 0.0283, 0.0303, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:20:51,607 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35591.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:21:10,649 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8997, 1.6697, 1.4249, 1.5072, 1.5694, 1.6117, 1.5947, 2.3626], + device='cuda:5'), covar=tensor([0.5749, 0.6263, 0.4535, 0.5897, 0.5442, 0.3318, 0.5556, 0.2274], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0259, 0.0220, 0.0282, 0.0240, 0.0205, 0.0244, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:21:20,134 INFO [finetune.py:976] (5/7) Epoch 7, batch 1250, loss[loss=0.2137, simple_loss=0.2752, pruned_loss=0.07606, over 4790.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2743, pruned_loss=0.07677, over 955902.93 frames. ], batch size: 29, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:21:27,230 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9260, 3.8391, 3.6194, 1.7924, 3.8743, 2.9926, 0.7656, 2.7711], + device='cuda:5'), covar=tensor([0.2075, 0.1557, 0.1540, 0.3358, 0.1038, 0.0931, 0.4705, 0.1492], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0172, 0.0162, 0.0128, 0.0155, 0.0122, 0.0145, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 07:21:31,427 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.265e+02 1.688e+02 2.018e+02 2.672e+02 1.298e+03, threshold=4.035e+02, percent-clipped=4.0 +2023-03-26 07:22:18,865 INFO [finetune.py:976] (5/7) Epoch 7, batch 1300, loss[loss=0.2034, simple_loss=0.262, pruned_loss=0.07243, over 4768.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2702, pruned_loss=0.07457, over 955677.69 frames. ], batch size: 54, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:23:06,115 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35701.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 07:23:25,710 INFO [finetune.py:976] (5/7) Epoch 7, batch 1350, loss[loss=0.2108, simple_loss=0.2746, pruned_loss=0.07345, over 4858.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.271, pruned_loss=0.07533, over 956220.61 frames. ], batch size: 44, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:23:29,245 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4274, 1.4326, 1.3969, 1.6753, 1.5283, 2.9617, 1.2370, 1.4708], + device='cuda:5'), covar=tensor([0.1010, 0.1750, 0.1144, 0.1015, 0.1639, 0.0260, 0.1537, 0.1757], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0081, 0.0077, 0.0079, 0.0092, 0.0083, 0.0084, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 07:23:38,295 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.109e+01 1.659e+02 1.871e+02 2.249e+02 4.421e+02, threshold=3.743e+02, percent-clipped=1.0 +2023-03-26 07:23:45,990 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35730.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:24:06,319 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35748.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:24:25,447 INFO [finetune.py:976] (5/7) Epoch 7, batch 1400, loss[loss=0.2048, simple_loss=0.2655, pruned_loss=0.07202, over 4852.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2743, pruned_loss=0.07658, over 957195.50 frames. ], batch size: 47, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:24:33,473 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35771.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:25:01,857 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=35796.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:25:04,166 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3448, 3.7468, 3.9522, 4.1609, 4.0652, 3.8431, 4.3970, 1.4598], + device='cuda:5'), covar=tensor([0.0790, 0.0838, 0.0780, 0.1000, 0.1203, 0.1469, 0.0681, 0.5244], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0241, 0.0272, 0.0292, 0.0331, 0.0283, 0.0303, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:25:20,925 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35811.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:25:23,865 INFO [finetune.py:976] (5/7) Epoch 7, batch 1450, loss[loss=0.1885, simple_loss=0.2526, pruned_loss=0.06216, over 4751.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2759, pruned_loss=0.07637, over 957778.58 frames. ], batch size: 26, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:25:33,658 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.204e+02 1.737e+02 2.011e+02 2.560e+02 4.083e+02, threshold=4.021e+02, percent-clipped=3.0 +2023-03-26 07:25:43,689 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35832.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:25:51,855 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5874, 1.6921, 1.2222, 1.4735, 1.8392, 1.6189, 1.4602, 1.3280], + device='cuda:5'), covar=tensor([0.0298, 0.0287, 0.0549, 0.0321, 0.0212, 0.0531, 0.0322, 0.0428], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0111, 0.0138, 0.0116, 0.0104, 0.0100, 0.0091, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.8873e-05, 8.7373e-05, 1.1047e-04, 9.1584e-05, 8.2238e-05, 7.4230e-05, + 6.9186e-05, 8.5319e-05], device='cuda:5') +2023-03-26 07:26:02,793 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2659, 3.7319, 3.9032, 4.0662, 3.9441, 3.7989, 4.3339, 1.4433], + device='cuda:5'), covar=tensor([0.0701, 0.0740, 0.0674, 0.0944, 0.1185, 0.1232, 0.0634, 0.5077], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0241, 0.0272, 0.0292, 0.0331, 0.0282, 0.0303, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:26:20,570 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4741, 3.8713, 4.0061, 4.2464, 4.1802, 3.9664, 4.5194, 1.4884], + device='cuda:5'), covar=tensor([0.0686, 0.0747, 0.0715, 0.0961, 0.1200, 0.1280, 0.0599, 0.5084], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0241, 0.0272, 0.0292, 0.0331, 0.0282, 0.0303, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:26:24,786 INFO [finetune.py:976] (5/7) Epoch 7, batch 1500, loss[loss=0.2056, simple_loss=0.2733, pruned_loss=0.06897, over 4750.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2769, pruned_loss=0.07689, over 956499.19 frames. ], batch size: 28, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:26:32,637 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35872.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:26:47,875 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35886.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:27:11,198 INFO [finetune.py:976] (5/7) Epoch 7, batch 1550, loss[loss=0.1885, simple_loss=0.2517, pruned_loss=0.0627, over 4280.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2755, pruned_loss=0.0761, over 953912.60 frames. ], batch size: 18, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:27:17,686 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.940e+01 1.555e+02 1.902e+02 2.352e+02 4.828e+02, threshold=3.804e+02, percent-clipped=1.0 +2023-03-26 07:27:39,795 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6891, 1.5602, 1.5645, 1.5991, 1.0434, 3.3181, 1.3852, 2.0247], + device='cuda:5'), covar=tensor([0.3178, 0.2282, 0.1936, 0.2301, 0.1899, 0.0224, 0.2728, 0.1209], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0115, 0.0118, 0.0123, 0.0117, 0.0099, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 07:27:45,066 INFO [finetune.py:976] (5/7) Epoch 7, batch 1600, loss[loss=0.1859, simple_loss=0.2424, pruned_loss=0.06469, over 4902.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2718, pruned_loss=0.0745, over 955249.74 frames. ], batch size: 36, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:28:25,889 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36001.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:28:34,631 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.91 vs. limit=5.0 +2023-03-26 07:28:35,457 INFO [finetune.py:976] (5/7) Epoch 7, batch 1650, loss[loss=0.2155, simple_loss=0.2768, pruned_loss=0.07714, over 4938.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.269, pruned_loss=0.07388, over 955067.64 frames. ], batch size: 38, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:28:41,915 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.569e+02 1.867e+02 2.342e+02 3.778e+02, threshold=3.734e+02, percent-clipped=0.0 +2023-03-26 07:28:50,686 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36030.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:29:09,240 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=36049.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:29:25,210 INFO [finetune.py:976] (5/7) Epoch 7, batch 1700, loss[loss=0.2161, simple_loss=0.2705, pruned_loss=0.08089, over 4183.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2675, pruned_loss=0.07365, over 956710.60 frames. ], batch size: 66, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:29:39,786 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=36078.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:29:58,260 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2664, 1.4066, 1.1116, 1.3531, 1.4598, 1.4667, 1.3887, 1.1922], + device='cuda:5'), covar=tensor([0.0359, 0.0220, 0.0485, 0.0235, 0.0219, 0.0305, 0.0235, 0.0303], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0110, 0.0138, 0.0116, 0.0104, 0.0099, 0.0091, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.8814e-05, 8.6870e-05, 1.1004e-04, 9.1143e-05, 8.1695e-05, 7.3955e-05, + 6.8855e-05, 8.4868e-05], device='cuda:5') +2023-03-26 07:30:15,626 INFO [finetune.py:976] (5/7) Epoch 7, batch 1750, loss[loss=0.2462, simple_loss=0.3085, pruned_loss=0.09197, over 4862.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2716, pruned_loss=0.07551, over 955222.69 frames. ], batch size: 44, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:30:27,793 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.647e+02 1.960e+02 2.452e+02 4.962e+02, threshold=3.920e+02, percent-clipped=3.0 +2023-03-26 07:30:28,502 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36127.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:30:55,306 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6968, 3.4812, 3.3322, 1.4089, 3.5823, 2.7154, 1.0134, 2.3728], + device='cuda:5'), covar=tensor([0.2007, 0.1657, 0.1521, 0.3505, 0.0996, 0.0973, 0.4125, 0.1533], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0172, 0.0162, 0.0129, 0.0155, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 07:31:18,546 INFO [finetune.py:976] (5/7) Epoch 7, batch 1800, loss[loss=0.2036, simple_loss=0.2706, pruned_loss=0.0683, over 4827.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.274, pruned_loss=0.07571, over 953264.96 frames. ], batch size: 39, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:31:19,217 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36167.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:31:38,140 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36181.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:31:47,177 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36186.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:32:09,277 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4969, 2.2268, 1.9946, 0.9776, 2.1281, 1.8692, 1.6410, 2.1209], + device='cuda:5'), covar=tensor([0.0805, 0.0877, 0.1416, 0.2015, 0.1308, 0.2028, 0.2050, 0.0955], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0200, 0.0197, 0.0187, 0.0214, 0.0204, 0.0218, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:32:21,146 INFO [finetune.py:976] (5/7) Epoch 7, batch 1850, loss[loss=0.2066, simple_loss=0.264, pruned_loss=0.07462, over 4731.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2738, pruned_loss=0.07574, over 954118.55 frames. ], batch size: 54, lr: 3.88e-03, grad_scale: 16.0 +2023-03-26 07:32:21,288 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9956, 1.8335, 1.6881, 2.1698, 2.4981, 2.0486, 1.5702, 1.5861], + device='cuda:5'), covar=tensor([0.2223, 0.2309, 0.1946, 0.1614, 0.1742, 0.1117, 0.2569, 0.1958], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0209, 0.0204, 0.0186, 0.0239, 0.0176, 0.0213, 0.0192], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:32:33,458 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 1.736e+02 2.131e+02 2.651e+02 6.216e+02, threshold=4.263e+02, percent-clipped=3.0 +2023-03-26 07:32:40,453 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=36234.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:32:50,327 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36242.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:33:20,734 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-26 07:33:21,423 INFO [finetune.py:976] (5/7) Epoch 7, batch 1900, loss[loss=0.2323, simple_loss=0.2869, pruned_loss=0.0889, over 4909.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2734, pruned_loss=0.07486, over 955463.87 frames. ], batch size: 37, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:33:39,018 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5581, 1.4763, 1.3629, 1.4568, 1.7821, 1.6954, 1.5531, 1.3415], + device='cuda:5'), covar=tensor([0.0319, 0.0287, 0.0507, 0.0269, 0.0220, 0.0408, 0.0273, 0.0414], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0111, 0.0138, 0.0116, 0.0104, 0.0100, 0.0091, 0.0109], + device='cuda:5'), out_proj_covar=tensor([6.9157e-05, 8.7262e-05, 1.1068e-04, 9.1400e-05, 8.2030e-05, 7.4066e-05, + 6.9211e-05, 8.5112e-05], device='cuda:5') +2023-03-26 07:33:42,451 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 07:33:52,174 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-26 07:34:25,474 INFO [finetune.py:976] (5/7) Epoch 7, batch 1950, loss[loss=0.2404, simple_loss=0.292, pruned_loss=0.09444, over 4900.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2706, pruned_loss=0.07376, over 954551.52 frames. ], batch size: 36, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:34:36,929 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.969e+01 1.685e+02 2.051e+02 2.475e+02 4.640e+02, threshold=4.103e+02, percent-clipped=3.0 +2023-03-26 07:35:28,761 INFO [finetune.py:976] (5/7) Epoch 7, batch 2000, loss[loss=0.2025, simple_loss=0.2627, pruned_loss=0.07118, over 4866.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2684, pruned_loss=0.0729, over 954655.94 frames. ], batch size: 31, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:35:57,739 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0084, 1.2569, 1.7953, 1.7911, 1.5394, 1.5672, 1.6353, 1.6421], + device='cuda:5'), covar=tensor([0.4924, 0.5989, 0.5105, 0.5424, 0.6849, 0.5014, 0.6959, 0.4926], + device='cuda:5'), in_proj_covar=tensor([0.0227, 0.0241, 0.0253, 0.0253, 0.0241, 0.0218, 0.0269, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:36:30,437 INFO [finetune.py:976] (5/7) Epoch 7, batch 2050, loss[loss=0.1814, simple_loss=0.2398, pruned_loss=0.06148, over 4893.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2662, pruned_loss=0.07231, over 955052.71 frames. ], batch size: 32, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:36:43,670 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.910e+01 1.532e+02 1.893e+02 2.218e+02 7.941e+02, threshold=3.786e+02, percent-clipped=2.0 +2023-03-26 07:36:44,405 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36427.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:36:53,562 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.71 vs. limit=5.0 +2023-03-26 07:37:04,434 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-03-26 07:37:34,744 INFO [finetune.py:976] (5/7) Epoch 7, batch 2100, loss[loss=0.1885, simple_loss=0.2549, pruned_loss=0.06103, over 4822.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2658, pruned_loss=0.07215, over 955419.92 frames. ], batch size: 30, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:37:35,456 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36467.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:37:45,797 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=36475.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:38:24,603 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 07:38:36,450 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=36515.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:38:36,993 INFO [finetune.py:976] (5/7) Epoch 7, batch 2150, loss[loss=0.2124, simple_loss=0.2924, pruned_loss=0.06619, over 4921.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2687, pruned_loss=0.07329, over 953423.03 frames. ], batch size: 42, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:38:47,864 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.56 vs. limit=5.0 +2023-03-26 07:38:48,001 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.129e+02 1.787e+02 2.211e+02 2.590e+02 5.595e+02, threshold=4.423e+02, percent-clipped=4.0 +2023-03-26 07:39:05,370 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36537.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:39:09,629 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3561, 1.3029, 1.6721, 1.1460, 1.2983, 1.5013, 1.2802, 1.5783], + device='cuda:5'), covar=tensor([0.1172, 0.2068, 0.1213, 0.1467, 0.0933, 0.1213, 0.2720, 0.0810], + device='cuda:5'), in_proj_covar=tensor([0.0200, 0.0203, 0.0196, 0.0195, 0.0180, 0.0219, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:39:26,863 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.46 vs. limit=5.0 +2023-03-26 07:39:35,927 INFO [finetune.py:976] (5/7) Epoch 7, batch 2200, loss[loss=0.2503, simple_loss=0.3036, pruned_loss=0.09852, over 4794.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2727, pruned_loss=0.07485, over 955118.20 frames. ], batch size: 51, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:40:25,421 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36605.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:40:27,780 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36608.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:40:35,327 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36612.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:40:38,179 INFO [finetune.py:976] (5/7) Epoch 7, batch 2250, loss[loss=0.1803, simple_loss=0.2518, pruned_loss=0.05444, over 4931.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2753, pruned_loss=0.07584, over 955837.57 frames. ], batch size: 29, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:40:49,912 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.735e+02 1.950e+02 2.446e+02 5.153e+02, threshold=3.899e+02, percent-clipped=1.0 +2023-03-26 07:41:18,496 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5117, 2.1570, 1.6660, 0.7601, 1.8164, 1.9819, 1.7664, 2.0210], + device='cuda:5'), covar=tensor([0.0782, 0.0835, 0.1482, 0.2281, 0.1551, 0.2283, 0.2313, 0.0913], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0203, 0.0201, 0.0189, 0.0219, 0.0208, 0.0222, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:41:41,217 INFO [finetune.py:976] (5/7) Epoch 7, batch 2300, loss[loss=0.2364, simple_loss=0.2872, pruned_loss=0.0928, over 4780.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.274, pruned_loss=0.07444, over 954512.53 frames. ], batch size: 25, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:41:41,350 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36666.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:41:48,510 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36669.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:41:51,476 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36673.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:42:01,323 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9544, 4.2152, 4.0274, 2.2749, 4.3914, 3.3433, 0.8654, 2.8781], + device='cuda:5'), covar=tensor([0.2650, 0.1496, 0.1389, 0.3002, 0.0859, 0.0877, 0.4399, 0.1480], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0172, 0.0161, 0.0128, 0.0154, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 07:42:18,153 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36700.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:42:35,599 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8951, 1.3043, 1.7950, 1.7363, 1.5226, 1.5762, 1.6984, 1.6336], + device='cuda:5'), covar=tensor([0.4725, 0.5919, 0.4679, 0.5081, 0.6112, 0.4845, 0.6629, 0.4587], + device='cuda:5'), in_proj_covar=tensor([0.0228, 0.0242, 0.0254, 0.0253, 0.0242, 0.0219, 0.0270, 0.0224], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:42:36,644 INFO [finetune.py:976] (5/7) Epoch 7, batch 2350, loss[loss=0.2093, simple_loss=0.2626, pruned_loss=0.07797, over 4791.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2719, pruned_loss=0.07379, over 952918.33 frames. ], batch size: 29, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:42:37,353 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9831, 1.5510, 2.4091, 3.8259, 2.6061, 2.6000, 0.8505, 3.0480], + device='cuda:5'), covar=tensor([0.1714, 0.1623, 0.1349, 0.0495, 0.0774, 0.1601, 0.2073, 0.0528], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0117, 0.0133, 0.0164, 0.0101, 0.0139, 0.0127, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 07:42:49,195 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.103e+02 1.509e+02 1.866e+02 2.321e+02 4.735e+02, threshold=3.732e+02, percent-clipped=2.0 +2023-03-26 07:43:29,356 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36761.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:43:37,712 INFO [finetune.py:976] (5/7) Epoch 7, batch 2400, loss[loss=0.1882, simple_loss=0.2383, pruned_loss=0.06901, over 3981.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2684, pruned_loss=0.07247, over 951899.32 frames. ], batch size: 17, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:43:58,728 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9956, 1.7705, 1.5728, 1.5711, 1.7759, 1.6836, 1.6999, 2.4366], + device='cuda:5'), covar=tensor([0.5948, 0.5804, 0.4611, 0.5276, 0.4941, 0.3377, 0.5321, 0.2212], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0258, 0.0220, 0.0281, 0.0239, 0.0204, 0.0243, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:44:19,307 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3899, 1.5156, 1.6040, 1.6558, 1.5523, 3.2251, 1.3889, 1.5688], + device='cuda:5'), covar=tensor([0.1048, 0.1726, 0.1125, 0.1075, 0.1623, 0.0266, 0.1452, 0.1752], + device='cuda:5'), in_proj_covar=tensor([0.0077, 0.0081, 0.0076, 0.0079, 0.0092, 0.0083, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 07:44:28,518 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36805.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 07:44:40,196 INFO [finetune.py:976] (5/7) Epoch 7, batch 2450, loss[loss=0.1831, simple_loss=0.2449, pruned_loss=0.06068, over 4721.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2659, pruned_loss=0.07145, over 951609.77 frames. ], batch size: 59, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:44:51,698 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.798e+02 2.140e+02 2.594e+02 4.660e+02, threshold=4.281e+02, percent-clipped=3.0 +2023-03-26 07:45:00,719 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0654, 1.0256, 1.0840, 0.3277, 0.8694, 1.1812, 1.2704, 1.0365], + device='cuda:5'), covar=tensor([0.0874, 0.0536, 0.0429, 0.0597, 0.0574, 0.0543, 0.0362, 0.0627], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0155, 0.0118, 0.0136, 0.0131, 0.0123, 0.0144, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.5827e-05, 1.1414e-04, 8.5642e-05, 9.8967e-05, 9.3823e-05, 9.0630e-05, + 1.0629e-04, 1.0585e-04], device='cuda:5') +2023-03-26 07:45:10,260 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36837.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:45:49,125 INFO [finetune.py:976] (5/7) Epoch 7, batch 2500, loss[loss=0.2235, simple_loss=0.2874, pruned_loss=0.07978, over 4761.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2676, pruned_loss=0.07253, over 950936.99 frames. ], batch size: 54, lr: 3.87e-03, grad_scale: 16.0 +2023-03-26 07:45:49,283 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36866.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:46:12,542 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=36885.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:46:44,083 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36911.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:46:52,300 INFO [finetune.py:976] (5/7) Epoch 7, batch 2550, loss[loss=0.2088, simple_loss=0.2746, pruned_loss=0.07152, over 4900.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2704, pruned_loss=0.07296, over 952009.84 frames. ], batch size: 37, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:47:03,265 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.620e+02 1.912e+02 2.307e+02 6.491e+02, threshold=3.825e+02, percent-clipped=1.0 +2023-03-26 07:47:45,970 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36958.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:47:47,755 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36961.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:47:55,219 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36964.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:47:56,361 INFO [finetune.py:976] (5/7) Epoch 7, batch 2600, loss[loss=0.1927, simple_loss=0.2554, pruned_loss=0.06499, over 4818.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2717, pruned_loss=0.07333, over 953892.50 frames. ], batch size: 39, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:47:57,656 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36968.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:48:05,162 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36972.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:48:08,625 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-03-26 07:48:41,465 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37004.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:48:53,702 INFO [finetune.py:976] (5/7) Epoch 7, batch 2650, loss[loss=0.2606, simple_loss=0.3107, pruned_loss=0.1053, over 4891.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2736, pruned_loss=0.0744, over 954379.45 frames. ], batch size: 36, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:49:01,191 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37019.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:49:05,451 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.627e+02 1.954e+02 2.393e+02 3.704e+02, threshold=3.907e+02, percent-clipped=0.0 +2023-03-26 07:49:41,696 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37056.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 07:49:47,738 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37065.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:49:48,223 INFO [finetune.py:976] (5/7) Epoch 7, batch 2700, loss[loss=0.1898, simple_loss=0.2584, pruned_loss=0.06064, over 4756.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2723, pruned_loss=0.0733, over 955597.35 frames. ], batch size: 27, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:49:53,144 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6576, 2.3473, 1.8929, 0.9357, 2.0940, 1.9914, 1.8321, 2.0767], + device='cuda:5'), covar=tensor([0.0898, 0.0882, 0.1729, 0.2263, 0.1591, 0.2385, 0.2239, 0.1092], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0201, 0.0199, 0.0189, 0.0217, 0.0206, 0.0220, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:50:18,612 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0286, 4.5697, 4.3166, 2.5526, 4.6269, 3.5309, 0.8210, 3.2778], + device='cuda:5'), covar=tensor([0.2193, 0.1462, 0.1482, 0.3023, 0.0793, 0.0880, 0.4985, 0.1442], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0173, 0.0163, 0.0129, 0.0154, 0.0123, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 07:50:22,104 INFO [finetune.py:976] (5/7) Epoch 7, batch 2750, loss[loss=0.1822, simple_loss=0.246, pruned_loss=0.05918, over 4906.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2694, pruned_loss=0.07281, over 954513.45 frames. ], batch size: 43, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:50:28,700 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.629e+02 1.991e+02 2.307e+02 4.303e+02, threshold=3.983e+02, percent-clipped=1.0 +2023-03-26 07:50:32,456 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7373, 3.4729, 3.2931, 1.6279, 3.5810, 2.7222, 0.7469, 2.5125], + device='cuda:5'), covar=tensor([0.2498, 0.1975, 0.1812, 0.3322, 0.1127, 0.1041, 0.4557, 0.1512], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0173, 0.0163, 0.0129, 0.0154, 0.0123, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 07:50:58,199 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37161.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 07:51:03,501 INFO [finetune.py:976] (5/7) Epoch 7, batch 2800, loss[loss=0.194, simple_loss=0.2534, pruned_loss=0.06729, over 4822.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2657, pruned_loss=0.07112, over 955771.92 frames. ], batch size: 33, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:51:26,884 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9369, 1.7969, 1.4694, 1.5806, 1.6879, 1.6627, 1.6892, 2.4115], + device='cuda:5'), covar=tensor([0.5297, 0.5880, 0.4518, 0.5814, 0.5254, 0.3020, 0.5347, 0.2209], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0259, 0.0220, 0.0281, 0.0240, 0.0204, 0.0244, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:52:09,301 INFO [finetune.py:976] (5/7) Epoch 7, batch 2850, loss[loss=0.2067, simple_loss=0.2705, pruned_loss=0.0714, over 4680.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2644, pruned_loss=0.07071, over 955682.64 frames. ], batch size: 23, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:52:20,862 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.582e+02 1.929e+02 2.327e+02 4.539e+02, threshold=3.857e+02, percent-clipped=3.0 +2023-03-26 07:52:37,313 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0763, 4.7238, 4.4117, 2.5661, 4.7743, 3.7767, 0.9062, 3.3436], + device='cuda:5'), covar=tensor([0.2316, 0.1429, 0.1351, 0.2959, 0.0680, 0.0758, 0.4635, 0.1396], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0173, 0.0162, 0.0129, 0.0153, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 07:53:00,965 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37261.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:53:02,787 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37264.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:53:03,914 INFO [finetune.py:976] (5/7) Epoch 7, batch 2900, loss[loss=0.2431, simple_loss=0.2938, pruned_loss=0.09624, over 4829.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2675, pruned_loss=0.07228, over 954423.89 frames. ], batch size: 33, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:53:09,391 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37267.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:53:09,984 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37268.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:53:10,022 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37268.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:54:04,936 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37309.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:54:06,759 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37312.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:54:08,013 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37314.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:54:13,845 INFO [finetune.py:976] (5/7) Epoch 7, batch 2950, loss[loss=0.1984, simple_loss=0.2483, pruned_loss=0.07427, over 4301.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2706, pruned_loss=0.07282, over 952834.62 frames. ], batch size: 18, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:54:13,909 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37316.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:54:25,441 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.193e+02 1.702e+02 2.045e+02 2.514e+02 5.908e+02, threshold=4.090e+02, percent-clipped=3.0 +2023-03-26 07:54:27,384 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37329.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:55:00,460 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37356.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 07:55:05,685 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37360.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:55:08,132 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37364.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:55:09,241 INFO [finetune.py:976] (5/7) Epoch 7, batch 3000, loss[loss=0.2103, simple_loss=0.2815, pruned_loss=0.06955, over 4861.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2737, pruned_loss=0.07479, over 951832.12 frames. ], batch size: 31, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:55:09,242 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 07:55:11,748 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8114, 3.4928, 3.4004, 1.4311, 3.5983, 2.7214, 0.7398, 2.3563], + device='cuda:5'), covar=tensor([0.1724, 0.1640, 0.1389, 0.3591, 0.1046, 0.1064, 0.4206, 0.1603], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0171, 0.0161, 0.0128, 0.0152, 0.0121, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 07:55:14,490 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9499, 1.4322, 0.9965, 1.7754, 2.2184, 1.1778, 1.5440, 1.7877], + device='cuda:5'), covar=tensor([0.1302, 0.1817, 0.1837, 0.1072, 0.1781, 0.2056, 0.1324, 0.1799], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0097, 0.0113, 0.0093, 0.0124, 0.0096, 0.0101, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 07:55:23,962 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8409, 3.4174, 3.5730, 3.7528, 3.5716, 3.4248, 3.8913, 1.3575], + device='cuda:5'), covar=tensor([0.0837, 0.0871, 0.0849, 0.0876, 0.1374, 0.1508, 0.0724, 0.4695], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0243, 0.0275, 0.0295, 0.0333, 0.0284, 0.0304, 0.0298], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:55:25,225 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8549, 1.6214, 1.5574, 1.8965, 2.1937, 1.9003, 1.2511, 1.6077], + device='cuda:5'), covar=tensor([0.2204, 0.2314, 0.1976, 0.1782, 0.1646, 0.1157, 0.2722, 0.1923], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0210, 0.0205, 0.0187, 0.0240, 0.0179, 0.0215, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:55:25,745 INFO [finetune.py:1010] (5/7) Epoch 7, validation: loss=0.161, simple_loss=0.2327, pruned_loss=0.04464, over 2265189.00 frames. +2023-03-26 07:55:25,746 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 07:55:54,505 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37404.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 07:56:02,293 INFO [finetune.py:976] (5/7) Epoch 7, batch 3050, loss[loss=0.1602, simple_loss=0.2434, pruned_loss=0.03846, over 4763.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2741, pruned_loss=0.07464, over 953712.44 frames. ], batch size: 25, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:56:11,554 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37425.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:56:12,026 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.581e+02 1.871e+02 2.387e+02 4.591e+02, threshold=3.742e+02, percent-clipped=1.0 +2023-03-26 07:56:21,161 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4223, 1.4376, 1.8625, 1.7978, 1.6183, 3.3586, 1.2493, 1.6650], + device='cuda:5'), covar=tensor([0.1000, 0.1734, 0.1183, 0.1060, 0.1571, 0.0245, 0.1530, 0.1611], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0076, 0.0079, 0.0092, 0.0082, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 07:56:48,964 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37461.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 07:56:57,057 INFO [finetune.py:976] (5/7) Epoch 7, batch 3100, loss[loss=0.1782, simple_loss=0.2363, pruned_loss=0.06009, over 4041.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2711, pruned_loss=0.07349, over 950886.00 frames. ], batch size: 17, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:57:38,294 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3864, 2.0268, 2.6512, 4.3485, 3.1722, 2.7938, 1.1775, 3.4433], + device='cuda:5'), covar=tensor([0.1667, 0.1451, 0.1460, 0.0459, 0.0653, 0.1468, 0.2025, 0.0493], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0117, 0.0135, 0.0166, 0.0102, 0.0140, 0.0129, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 07:57:52,638 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37509.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 07:58:01,950 INFO [finetune.py:976] (5/7) Epoch 7, batch 3150, loss[loss=0.1842, simple_loss=0.2555, pruned_loss=0.05652, over 4870.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2693, pruned_loss=0.0731, over 953239.19 frames. ], batch size: 34, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:58:13,088 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.704e+02 2.041e+02 2.515e+02 5.799e+02, threshold=4.081e+02, percent-clipped=3.0 +2023-03-26 07:58:45,535 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0913, 1.8891, 1.6607, 1.9414, 1.8216, 1.8453, 1.8303, 2.5992], + device='cuda:5'), covar=tensor([0.5864, 0.6474, 0.4782, 0.6198, 0.5910, 0.3405, 0.6126, 0.2243], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0258, 0.0220, 0.0281, 0.0241, 0.0205, 0.0245, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 07:59:05,777 INFO [finetune.py:976] (5/7) Epoch 7, batch 3200, loss[loss=0.1835, simple_loss=0.2502, pruned_loss=0.05845, over 4868.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2653, pruned_loss=0.07167, over 951854.32 frames. ], batch size: 34, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 07:59:06,463 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37567.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 07:59:24,807 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-26 08:00:08,907 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37614.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:00:14,409 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37615.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:00:14,948 INFO [finetune.py:976] (5/7) Epoch 7, batch 3250, loss[loss=0.229, simple_loss=0.3052, pruned_loss=0.07641, over 4811.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2672, pruned_loss=0.0727, over 952739.60 frames. ], batch size: 51, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 08:00:24,939 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37624.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:00:26,116 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.664e+02 1.918e+02 2.274e+02 4.430e+02, threshold=3.836e+02, percent-clipped=1.0 +2023-03-26 08:01:09,574 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37660.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:01:10,724 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37662.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:01:18,268 INFO [finetune.py:976] (5/7) Epoch 7, batch 3300, loss[loss=0.2849, simple_loss=0.3388, pruned_loss=0.1155, over 4864.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2706, pruned_loss=0.07444, over 951010.84 frames. ], batch size: 44, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 08:01:21,979 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4650, 3.2190, 3.0648, 1.3533, 3.4191, 2.4873, 0.9354, 2.1508], + device='cuda:5'), covar=tensor([0.2939, 0.2160, 0.1933, 0.3627, 0.1279, 0.1073, 0.4191, 0.1639], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0172, 0.0161, 0.0128, 0.0153, 0.0122, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 08:01:32,995 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9946, 1.6087, 1.8586, 1.8651, 1.5852, 1.6514, 1.7974, 1.7119], + device='cuda:5'), covar=tensor([0.5590, 0.6933, 0.5570, 0.6522, 0.7678, 0.5589, 0.8642, 0.5656], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0242, 0.0255, 0.0254, 0.0244, 0.0220, 0.0272, 0.0226], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:02:12,468 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37708.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:02:22,970 INFO [finetune.py:976] (5/7) Epoch 7, batch 3350, loss[loss=0.2067, simple_loss=0.2664, pruned_loss=0.07352, over 4895.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2744, pruned_loss=0.07552, over 952625.92 frames. ], batch size: 37, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 08:02:24,986 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 08:02:25,473 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37720.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:02:31,778 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6018, 1.4360, 2.2535, 3.3101, 2.2466, 2.2722, 1.1246, 2.6233], + device='cuda:5'), covar=tensor([0.1785, 0.1555, 0.1190, 0.0597, 0.0787, 0.1453, 0.1847, 0.0602], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0118, 0.0135, 0.0166, 0.0102, 0.0140, 0.0129, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 08:02:34,114 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.767e+02 2.019e+02 2.457e+02 5.992e+02, threshold=4.038e+02, percent-clipped=4.0 +2023-03-26 08:03:28,095 INFO [finetune.py:976] (5/7) Epoch 7, batch 3400, loss[loss=0.2626, simple_loss=0.3221, pruned_loss=0.1015, over 4788.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.276, pruned_loss=0.07612, over 952440.15 frames. ], batch size: 45, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 08:03:39,301 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2316, 1.2564, 1.4677, 1.0415, 1.2691, 1.3582, 1.2438, 1.4565], + device='cuda:5'), covar=tensor([0.0981, 0.1823, 0.1106, 0.1096, 0.0798, 0.1021, 0.2422, 0.0733], + device='cuda:5'), in_proj_covar=tensor([0.0202, 0.0204, 0.0198, 0.0195, 0.0181, 0.0220, 0.0217, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:03:48,168 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5383, 1.6308, 1.7322, 0.9557, 1.7001, 1.9994, 1.9264, 1.5410], + device='cuda:5'), covar=tensor([0.0929, 0.0659, 0.0483, 0.0646, 0.0498, 0.0555, 0.0375, 0.0805], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0154, 0.0120, 0.0136, 0.0130, 0.0123, 0.0144, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.5873e-05, 1.1382e-04, 8.6706e-05, 9.9253e-05, 9.3615e-05, 9.0725e-05, + 1.0603e-04, 1.0632e-04], device='cuda:5') +2023-03-26 08:04:07,415 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 08:04:08,450 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5470, 1.5319, 1.4564, 1.7485, 1.7941, 1.6837, 1.1670, 1.3285], + device='cuda:5'), covar=tensor([0.2184, 0.2055, 0.1841, 0.1514, 0.1916, 0.1158, 0.2704, 0.1965], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0209, 0.0205, 0.0187, 0.0240, 0.0178, 0.0214, 0.0192], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:04:20,274 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 08:04:32,124 INFO [finetune.py:976] (5/7) Epoch 7, batch 3450, loss[loss=0.2057, simple_loss=0.2599, pruned_loss=0.07575, over 4919.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2748, pruned_loss=0.07488, over 953096.17 frames. ], batch size: 38, lr: 3.87e-03, grad_scale: 32.0 +2023-03-26 08:04:43,342 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.715e+02 1.991e+02 2.496e+02 6.747e+02, threshold=3.982e+02, percent-clipped=3.0 +2023-03-26 08:05:36,357 INFO [finetune.py:976] (5/7) Epoch 7, batch 3500, loss[loss=0.1864, simple_loss=0.2397, pruned_loss=0.06661, over 4023.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.272, pruned_loss=0.07371, over 953155.45 frames. ], batch size: 17, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:05:45,630 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0735, 0.9439, 0.9769, 0.3345, 0.7416, 1.0873, 1.1384, 0.9832], + device='cuda:5'), covar=tensor([0.0935, 0.0749, 0.0547, 0.0583, 0.0645, 0.0832, 0.0461, 0.0695], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0156, 0.0121, 0.0137, 0.0131, 0.0124, 0.0145, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.6632e-05, 1.1500e-04, 8.7303e-05, 9.9859e-05, 9.4219e-05, 9.1638e-05, + 1.0680e-04, 1.0707e-04], device='cuda:5') +2023-03-26 08:06:03,549 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3009, 2.9208, 3.0577, 3.2046, 3.0613, 2.8885, 3.3422, 1.0764], + device='cuda:5'), covar=tensor([0.1225, 0.1134, 0.1171, 0.1297, 0.1719, 0.1742, 0.1156, 0.5259], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0243, 0.0274, 0.0293, 0.0331, 0.0282, 0.0303, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:06:41,166 INFO [finetune.py:976] (5/7) Epoch 7, batch 3550, loss[loss=0.1777, simple_loss=0.2452, pruned_loss=0.05511, over 4789.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.268, pruned_loss=0.07215, over 953104.96 frames. ], batch size: 28, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:06:57,390 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37924.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:06:58,532 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.559e+02 1.846e+02 2.185e+02 4.242e+02, threshold=3.693e+02, percent-clipped=1.0 +2023-03-26 08:07:52,119 INFO [finetune.py:976] (5/7) Epoch 7, batch 3600, loss[loss=0.1839, simple_loss=0.2569, pruned_loss=0.05546, over 4803.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2659, pruned_loss=0.07152, over 954165.01 frames. ], batch size: 29, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:08:01,699 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=37972.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:08:06,537 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37979.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:08:49,692 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-03-26 08:08:59,065 INFO [finetune.py:976] (5/7) Epoch 7, batch 3650, loss[loss=0.2115, simple_loss=0.2791, pruned_loss=0.07197, over 4902.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2691, pruned_loss=0.07326, over 955036.84 frames. ], batch size: 37, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:09:07,252 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38020.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:09:10,799 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.158e+02 1.699e+02 2.068e+02 2.418e+02 4.148e+02, threshold=4.136e+02, percent-clipped=4.0 +2023-03-26 08:09:27,947 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38040.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:09:35,955 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.74 vs. limit=5.0 +2023-03-26 08:09:46,799 INFO [finetune.py:976] (5/7) Epoch 7, batch 3700, loss[loss=0.2144, simple_loss=0.2913, pruned_loss=0.06878, over 4842.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.273, pruned_loss=0.0748, over 953282.23 frames. ], batch size: 47, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:09:48,564 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=38068.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:10:19,917 INFO [finetune.py:976] (5/7) Epoch 7, batch 3750, loss[loss=0.2255, simple_loss=0.2838, pruned_loss=0.08361, over 4815.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.275, pruned_loss=0.07567, over 954205.30 frames. ], batch size: 33, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:10:26,925 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.737e+01 1.627e+02 1.982e+02 2.503e+02 4.763e+02, threshold=3.965e+02, percent-clipped=1.0 +2023-03-26 08:10:57,172 INFO [finetune.py:976] (5/7) Epoch 7, batch 3800, loss[loss=0.2108, simple_loss=0.2669, pruned_loss=0.07735, over 4776.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2755, pruned_loss=0.07557, over 956400.36 frames. ], batch size: 51, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:11:08,278 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6015, 1.4577, 1.9893, 3.3888, 2.3128, 2.2339, 0.9499, 2.6002], + device='cuda:5'), covar=tensor([0.1895, 0.1550, 0.1460, 0.0604, 0.0774, 0.1551, 0.1999, 0.0610], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0118, 0.0135, 0.0166, 0.0102, 0.0140, 0.0129, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 08:11:30,369 INFO [finetune.py:976] (5/7) Epoch 7, batch 3850, loss[loss=0.1787, simple_loss=0.25, pruned_loss=0.05373, over 4916.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2734, pruned_loss=0.07425, over 956704.19 frames. ], batch size: 36, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:11:43,043 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.105e+02 1.610e+02 2.090e+02 2.406e+02 4.877e+02, threshold=4.181e+02, percent-clipped=2.0 +2023-03-26 08:12:25,297 INFO [finetune.py:976] (5/7) Epoch 7, batch 3900, loss[loss=0.1855, simple_loss=0.2559, pruned_loss=0.05758, over 4907.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2709, pruned_loss=0.07378, over 958031.49 frames. ], batch size: 36, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:13:28,058 INFO [finetune.py:976] (5/7) Epoch 7, batch 3950, loss[loss=0.2091, simple_loss=0.2693, pruned_loss=0.07445, over 4892.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2686, pruned_loss=0.07281, over 957922.53 frames. ], batch size: 35, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:13:45,512 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.693e+02 1.988e+02 2.374e+02 4.679e+02, threshold=3.976e+02, percent-clipped=1.0 +2023-03-26 08:13:56,367 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38335.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:14:21,686 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38360.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:14:25,157 INFO [finetune.py:976] (5/7) Epoch 7, batch 4000, loss[loss=0.2629, simple_loss=0.3156, pruned_loss=0.1051, over 4816.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2677, pruned_loss=0.07298, over 956175.90 frames. ], batch size: 51, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:15:29,788 INFO [finetune.py:976] (5/7) Epoch 7, batch 4050, loss[loss=0.192, simple_loss=0.255, pruned_loss=0.06447, over 4759.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2716, pruned_loss=0.07509, over 953796.88 frames. ], batch size: 28, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:15:32,360 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 08:15:33,992 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38421.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:15:42,070 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.248e+02 1.778e+02 2.129e+02 2.625e+02 5.238e+02, threshold=4.258e+02, percent-clipped=5.0 +2023-03-26 08:16:32,434 INFO [finetune.py:976] (5/7) Epoch 7, batch 4100, loss[loss=0.194, simple_loss=0.2568, pruned_loss=0.06558, over 4831.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2742, pruned_loss=0.07581, over 953045.31 frames. ], batch size: 30, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:16:48,940 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9396, 1.8842, 1.7802, 2.0939, 2.4014, 2.0620, 1.8146, 1.5635], + device='cuda:5'), covar=tensor([0.1988, 0.1893, 0.1644, 0.1394, 0.1892, 0.1064, 0.2238, 0.1645], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0209, 0.0203, 0.0186, 0.0238, 0.0177, 0.0213, 0.0191], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:17:09,804 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.06 vs. limit=5.0 +2023-03-26 08:17:12,782 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4323, 1.5007, 1.4781, 0.7891, 1.5705, 1.7773, 1.8126, 1.3859], + device='cuda:5'), covar=tensor([0.0885, 0.0617, 0.0439, 0.0580, 0.0381, 0.0533, 0.0271, 0.0646], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0120, 0.0137, 0.0131, 0.0124, 0.0145, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.6143e-05, 1.1509e-04, 8.7108e-05, 9.9710e-05, 9.4243e-05, 9.1115e-05, + 1.0661e-04, 1.0739e-04], device='cuda:5') +2023-03-26 08:17:14,531 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1538, 1.8630, 1.8091, 0.8532, 2.0347, 2.3038, 1.9777, 1.8927], + device='cuda:5'), covar=tensor([0.0871, 0.0779, 0.0566, 0.0786, 0.0537, 0.0535, 0.0513, 0.0644], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0120, 0.0137, 0.0131, 0.0124, 0.0145, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.6179e-05, 1.1513e-04, 8.7154e-05, 9.9752e-05, 9.4296e-05, 9.1145e-05, + 1.0666e-04, 1.0745e-04], device='cuda:5') +2023-03-26 08:17:20,601 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7560, 1.5831, 1.6255, 1.6948, 1.2473, 3.7219, 1.4470, 2.1538], + device='cuda:5'), covar=tensor([0.3460, 0.2481, 0.2129, 0.2216, 0.1844, 0.0159, 0.2546, 0.1298], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0119, 0.0123, 0.0117, 0.0098, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0003, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 08:17:31,592 INFO [finetune.py:976] (5/7) Epoch 7, batch 4150, loss[loss=0.2318, simple_loss=0.2982, pruned_loss=0.08265, over 4927.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2749, pruned_loss=0.07581, over 954509.08 frames. ], batch size: 33, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:17:43,680 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.723e+02 2.145e+02 2.598e+02 6.605e+02, threshold=4.291e+02, percent-clipped=2.0 +2023-03-26 08:18:32,253 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.66 vs. limit=5.0 +2023-03-26 08:18:34,212 INFO [finetune.py:976] (5/7) Epoch 7, batch 4200, loss[loss=0.2393, simple_loss=0.2766, pruned_loss=0.101, over 4708.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2741, pruned_loss=0.07498, over 954560.76 frames. ], batch size: 23, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:18:47,583 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3370, 2.6846, 2.1005, 1.7461, 2.5454, 2.6033, 2.4976, 2.1842], + device='cuda:5'), covar=tensor([0.0596, 0.0499, 0.0820, 0.0915, 0.0722, 0.0728, 0.0610, 0.0965], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0133, 0.0143, 0.0126, 0.0113, 0.0145, 0.0146, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:19:11,308 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-03-26 08:19:34,113 INFO [finetune.py:976] (5/7) Epoch 7, batch 4250, loss[loss=0.2098, simple_loss=0.2715, pruned_loss=0.07402, over 4837.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2713, pruned_loss=0.07364, over 956002.41 frames. ], batch size: 30, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:19:44,842 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.606e+02 1.980e+02 2.259e+02 5.740e+02, threshold=3.960e+02, percent-clipped=2.0 +2023-03-26 08:20:02,316 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38635.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:20:31,186 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-26 08:20:38,696 INFO [finetune.py:976] (5/7) Epoch 7, batch 4300, loss[loss=0.2129, simple_loss=0.2615, pruned_loss=0.08219, over 4784.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2691, pruned_loss=0.07313, over 955187.69 frames. ], batch size: 26, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:20:59,385 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=38683.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:20:59,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0810, 1.8485, 1.5618, 1.6037, 1.7984, 1.8185, 1.8266, 2.5730], + device='cuda:5'), covar=tensor([0.5330, 0.5120, 0.4496, 0.5476, 0.4962, 0.3370, 0.4957, 0.2217], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0258, 0.0219, 0.0280, 0.0239, 0.0204, 0.0244, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:21:24,210 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2752, 2.0468, 1.7437, 2.0260, 2.2325, 1.8855, 2.4898, 2.2001], + device='cuda:5'), covar=tensor([0.1431, 0.2841, 0.3699, 0.3335, 0.2788, 0.1789, 0.3634, 0.2111], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0190, 0.0235, 0.0253, 0.0233, 0.0192, 0.0212, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:21:41,238 INFO [finetune.py:976] (5/7) Epoch 7, batch 4350, loss[loss=0.1667, simple_loss=0.2409, pruned_loss=0.04628, over 4786.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2654, pruned_loss=0.07202, over 956168.49 frames. ], batch size: 29, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:21:41,321 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38716.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:21:52,344 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.679e+02 1.871e+02 2.197e+02 5.866e+02, threshold=3.741e+02, percent-clipped=4.0 +2023-03-26 08:21:54,287 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0704, 2.0696, 1.9675, 1.4867, 2.1805, 2.1376, 2.1430, 1.7345], + device='cuda:5'), covar=tensor([0.0561, 0.0572, 0.0751, 0.0922, 0.0486, 0.0647, 0.0555, 0.0977], + device='cuda:5'), in_proj_covar=tensor([0.0137, 0.0135, 0.0145, 0.0127, 0.0114, 0.0147, 0.0147, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:22:43,657 INFO [finetune.py:976] (5/7) Epoch 7, batch 4400, loss[loss=0.2164, simple_loss=0.2851, pruned_loss=0.07384, over 4903.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2667, pruned_loss=0.07254, over 956484.70 frames. ], batch size: 43, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:23:42,200 INFO [finetune.py:976] (5/7) Epoch 7, batch 4450, loss[loss=0.2546, simple_loss=0.3112, pruned_loss=0.09905, over 4813.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2706, pruned_loss=0.07375, over 957094.24 frames. ], batch size: 45, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:23:51,898 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38823.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:23:53,603 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.165e+02 1.712e+02 1.965e+02 2.330e+02 4.727e+02, threshold=3.929e+02, percent-clipped=4.0 +2023-03-26 08:24:44,781 INFO [finetune.py:976] (5/7) Epoch 7, batch 4500, loss[loss=0.2, simple_loss=0.2652, pruned_loss=0.06742, over 4698.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2729, pruned_loss=0.07429, over 956695.18 frames. ], batch size: 23, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:25:06,362 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38884.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:25:07,596 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9234, 1.7862, 1.5850, 1.7667, 1.6471, 1.6957, 1.6868, 2.3998], + device='cuda:5'), covar=tensor([0.4921, 0.5625, 0.3972, 0.5104, 0.5220, 0.2922, 0.5172, 0.2071], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0259, 0.0220, 0.0281, 0.0241, 0.0206, 0.0246, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:25:14,570 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3795, 2.0841, 2.7811, 1.9249, 2.6637, 2.6297, 2.1272, 2.7936], + device='cuda:5'), covar=tensor([0.1652, 0.2282, 0.1909, 0.2369, 0.1035, 0.1690, 0.2650, 0.0999], + device='cuda:5'), in_proj_covar=tensor([0.0205, 0.0205, 0.0200, 0.0197, 0.0183, 0.0223, 0.0218, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:25:49,099 INFO [finetune.py:976] (5/7) Epoch 7, batch 4550, loss[loss=0.2375, simple_loss=0.3011, pruned_loss=0.08691, over 4867.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2742, pruned_loss=0.07462, over 955435.56 frames. ], batch size: 34, lr: 3.86e-03, grad_scale: 64.0 +2023-03-26 08:25:59,513 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.671e+02 2.000e+02 2.524e+02 3.434e+02, threshold=4.000e+02, percent-clipped=0.0 +2023-03-26 08:26:47,280 INFO [finetune.py:976] (5/7) Epoch 7, batch 4600, loss[loss=0.1895, simple_loss=0.2549, pruned_loss=0.06207, over 4918.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2727, pruned_loss=0.07313, over 956958.15 frames. ], batch size: 33, lr: 3.86e-03, grad_scale: 64.0 +2023-03-26 08:27:54,985 INFO [finetune.py:976] (5/7) Epoch 7, batch 4650, loss[loss=0.1964, simple_loss=0.2534, pruned_loss=0.06976, over 4808.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2709, pruned_loss=0.07351, over 955053.72 frames. ], batch size: 25, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:27:55,086 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39016.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:28:06,171 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.761e+01 1.505e+02 1.924e+02 2.345e+02 4.238e+02, threshold=3.847e+02, percent-clipped=2.0 +2023-03-26 08:28:56,284 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=39064.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:28:57,994 INFO [finetune.py:976] (5/7) Epoch 7, batch 4700, loss[loss=0.1785, simple_loss=0.2472, pruned_loss=0.05487, over 4845.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2688, pruned_loss=0.07299, over 957578.53 frames. ], batch size: 44, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:29:56,021 INFO [finetune.py:976] (5/7) Epoch 7, batch 4750, loss[loss=0.1726, simple_loss=0.245, pruned_loss=0.05012, over 4756.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2672, pruned_loss=0.07227, over 958841.74 frames. ], batch size: 27, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:30:08,813 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.221e+02 1.609e+02 1.819e+02 2.206e+02 4.512e+02, threshold=3.638e+02, percent-clipped=2.0 +2023-03-26 08:30:10,230 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-26 08:30:28,652 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3864, 2.0911, 1.7091, 0.7597, 1.8647, 1.8658, 1.7276, 1.9716], + device='cuda:5'), covar=tensor([0.0971, 0.0877, 0.1671, 0.2370, 0.1670, 0.2199, 0.2312, 0.0996], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0201, 0.0201, 0.0189, 0.0218, 0.0206, 0.0223, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:30:58,731 INFO [finetune.py:976] (5/7) Epoch 7, batch 4800, loss[loss=0.1489, simple_loss=0.2231, pruned_loss=0.0373, over 4807.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2686, pruned_loss=0.073, over 956990.12 frames. ], batch size: 25, lr: 3.86e-03, grad_scale: 32.0 +2023-03-26 08:31:12,897 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39179.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:31:21,980 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7985, 3.9777, 3.7878, 1.9171, 4.1140, 3.0745, 0.8589, 2.8359], + device='cuda:5'), covar=tensor([0.2404, 0.1758, 0.1484, 0.3185, 0.0987, 0.0924, 0.4563, 0.1383], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0173, 0.0163, 0.0130, 0.0155, 0.0123, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 08:31:22,018 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39185.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:31:50,399 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 08:31:57,082 INFO [finetune.py:976] (5/7) Epoch 7, batch 4850, loss[loss=0.2312, simple_loss=0.29, pruned_loss=0.08622, over 4826.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2715, pruned_loss=0.07394, over 957083.31 frames. ], batch size: 30, lr: 3.86e-03, grad_scale: 16.0 +2023-03-26 08:32:06,051 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.691e+02 2.004e+02 2.499e+02 4.240e+02, threshold=4.008e+02, percent-clipped=2.0 +2023-03-26 08:32:11,530 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6864, 1.5412, 1.4359, 1.7904, 2.0470, 1.7035, 1.2822, 1.3868], + device='cuda:5'), covar=tensor([0.2172, 0.2123, 0.1873, 0.1548, 0.1673, 0.1200, 0.2641, 0.1816], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0209, 0.0205, 0.0187, 0.0240, 0.0178, 0.0214, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:32:18,171 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39246.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:32:19,037 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 08:32:30,568 INFO [finetune.py:976] (5/7) Epoch 7, batch 4900, loss[loss=0.2141, simple_loss=0.275, pruned_loss=0.07663, over 4816.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2723, pruned_loss=0.0739, over 958193.85 frames. ], batch size: 30, lr: 3.86e-03, grad_scale: 16.0 +2023-03-26 08:32:43,184 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39284.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:33:03,517 INFO [finetune.py:976] (5/7) Epoch 7, batch 4950, loss[loss=0.1643, simple_loss=0.2369, pruned_loss=0.04587, over 4755.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2733, pruned_loss=0.07427, over 956008.26 frames. ], batch size: 26, lr: 3.86e-03, grad_scale: 16.0 +2023-03-26 08:33:12,728 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.619e+02 1.980e+02 2.423e+02 3.796e+02, threshold=3.961e+02, percent-clipped=0.0 +2023-03-26 08:33:24,233 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39345.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:33:37,224 INFO [finetune.py:976] (5/7) Epoch 7, batch 5000, loss[loss=0.1885, simple_loss=0.2446, pruned_loss=0.06622, over 4851.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2716, pruned_loss=0.07375, over 956325.53 frames. ], batch size: 49, lr: 3.86e-03, grad_scale: 16.0 +2023-03-26 08:34:07,151 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 08:34:10,935 INFO [finetune.py:976] (5/7) Epoch 7, batch 5050, loss[loss=0.2083, simple_loss=0.2657, pruned_loss=0.07545, over 4935.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2693, pruned_loss=0.0734, over 956766.24 frames. ], batch size: 33, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:34:19,590 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.623e+02 1.955e+02 2.404e+02 3.498e+02, threshold=3.910e+02, percent-clipped=0.0 +2023-03-26 08:34:52,726 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39463.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:34:54,407 INFO [finetune.py:976] (5/7) Epoch 7, batch 5100, loss[loss=0.1859, simple_loss=0.2513, pruned_loss=0.06018, over 4866.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.266, pruned_loss=0.0718, over 957179.98 frames. ], batch size: 34, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:34:56,907 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2148, 3.6599, 3.8576, 4.0251, 3.9598, 3.7547, 4.2649, 1.4096], + device='cuda:5'), covar=tensor([0.0934, 0.0810, 0.0768, 0.1108, 0.1493, 0.1471, 0.0817, 0.5385], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0244, 0.0276, 0.0294, 0.0333, 0.0282, 0.0305, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:35:05,372 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39479.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:35:14,399 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3662, 2.2044, 1.8759, 2.3890, 2.4041, 1.9351, 2.7811, 2.4159], + device='cuda:5'), covar=tensor([0.1592, 0.3191, 0.3706, 0.3512, 0.2884, 0.1970, 0.4066, 0.2393], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0191, 0.0236, 0.0256, 0.0236, 0.0194, 0.0213, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:35:39,561 INFO [finetune.py:976] (5/7) Epoch 7, batch 5150, loss[loss=0.2142, simple_loss=0.2794, pruned_loss=0.07454, over 4823.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.266, pruned_loss=0.07252, over 955317.71 frames. ], batch size: 40, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:35:47,016 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39524.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:35:48,228 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9150, 1.8059, 1.5328, 1.7310, 1.9920, 1.6321, 2.1185, 1.9488], + device='cuda:5'), covar=tensor([0.1585, 0.2719, 0.3655, 0.2940, 0.2712, 0.1981, 0.3220, 0.2136], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0191, 0.0236, 0.0255, 0.0236, 0.0194, 0.0212, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:35:49,283 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=39527.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:35:49,816 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.405e+01 1.710e+02 2.016e+02 2.412e+02 5.054e+02, threshold=4.032e+02, percent-clipped=2.0 +2023-03-26 08:36:08,542 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39541.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:36:24,642 INFO [finetune.py:976] (5/7) Epoch 7, batch 5200, loss[loss=0.2517, simple_loss=0.3187, pruned_loss=0.09231, over 4906.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2687, pruned_loss=0.07289, over 955525.06 frames. ], batch size: 37, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:36:27,272 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7791, 1.6226, 1.5135, 1.4017, 1.8435, 1.5250, 1.7538, 1.7817], + device='cuda:5'), covar=tensor([0.1627, 0.2778, 0.3834, 0.2976, 0.2980, 0.2110, 0.3258, 0.2326], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0191, 0.0236, 0.0255, 0.0236, 0.0194, 0.0213, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:37:07,822 INFO [finetune.py:976] (5/7) Epoch 7, batch 5250, loss[loss=0.2216, simple_loss=0.2903, pruned_loss=0.0764, over 4872.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2729, pruned_loss=0.07405, over 957118.10 frames. ], batch size: 34, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:37:08,845 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.24 vs. limit=5.0 +2023-03-26 08:37:15,021 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.077e+02 1.709e+02 2.070e+02 2.577e+02 5.953e+02, threshold=4.140e+02, percent-clipped=1.0 +2023-03-26 08:37:25,977 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1594, 3.5921, 3.7517, 4.0089, 3.8662, 3.5903, 4.2460, 1.3741], + device='cuda:5'), covar=tensor([0.0945, 0.0850, 0.0887, 0.1092, 0.1472, 0.1694, 0.0843, 0.5296], + device='cuda:5'), in_proj_covar=tensor([0.0354, 0.0246, 0.0278, 0.0296, 0.0335, 0.0285, 0.0307, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:37:26,012 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39639.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 08:37:26,606 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39640.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:37:43,685 INFO [finetune.py:976] (5/7) Epoch 7, batch 5300, loss[loss=0.2345, simple_loss=0.2721, pruned_loss=0.09845, over 4112.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2742, pruned_loss=0.07482, over 955441.51 frames. ], batch size: 65, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:37:52,759 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6835, 1.4613, 1.3015, 1.1580, 1.4523, 1.4667, 1.4168, 2.0737], + device='cuda:5'), covar=tensor([0.5465, 0.5513, 0.4190, 0.5112, 0.4792, 0.3036, 0.5081, 0.2200], + device='cuda:5'), in_proj_covar=tensor([0.0281, 0.0256, 0.0218, 0.0278, 0.0239, 0.0204, 0.0243, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:38:23,001 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39700.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 08:38:30,954 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2689, 1.3751, 1.5976, 1.1136, 1.2842, 1.4876, 1.3159, 1.5723], + device='cuda:5'), covar=tensor([0.1116, 0.2001, 0.1204, 0.1423, 0.0945, 0.1195, 0.2816, 0.0843], + device='cuda:5'), in_proj_covar=tensor([0.0201, 0.0203, 0.0197, 0.0195, 0.0180, 0.0220, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:38:32,614 INFO [finetune.py:976] (5/7) Epoch 7, batch 5350, loss[loss=0.1977, simple_loss=0.2633, pruned_loss=0.0661, over 4817.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2727, pruned_loss=0.0735, over 955390.67 frames. ], batch size: 33, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:38:40,830 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.014e+02 1.565e+02 1.855e+02 2.323e+02 5.491e+02, threshold=3.710e+02, percent-clipped=1.0 +2023-03-26 08:39:15,939 INFO [finetune.py:976] (5/7) Epoch 7, batch 5400, loss[loss=0.1963, simple_loss=0.2518, pruned_loss=0.07036, over 4825.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2701, pruned_loss=0.07273, over 956429.87 frames. ], batch size: 40, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:39:16,665 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39767.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:39:20,661 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5688, 1.4216, 1.4719, 1.4818, 0.9828, 2.8659, 1.1131, 1.6637], + device='cuda:5'), covar=tensor([0.3186, 0.2250, 0.2034, 0.2279, 0.1906, 0.0240, 0.2658, 0.1276], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0115, 0.0119, 0.0123, 0.0117, 0.0099, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 08:39:27,315 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1731, 1.9622, 2.1025, 0.8760, 2.2939, 2.5071, 2.1511, 2.0052], + device='cuda:5'), covar=tensor([0.0858, 0.0698, 0.0452, 0.0753, 0.0463, 0.0573, 0.0410, 0.0531], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0157, 0.0121, 0.0137, 0.0132, 0.0125, 0.0145, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.6653e-05, 1.1542e-04, 8.7167e-05, 9.9514e-05, 9.4710e-05, 9.1742e-05, + 1.0658e-04, 1.0743e-04], device='cuda:5') +2023-03-26 08:39:50,815 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39815.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:39:51,312 INFO [finetune.py:976] (5/7) Epoch 7, batch 5450, loss[loss=0.2016, simple_loss=0.2628, pruned_loss=0.07023, over 4925.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2665, pruned_loss=0.07129, over 956350.85 frames. ], batch size: 37, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:39:53,199 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39819.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:40:03,634 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.339e+01 1.599e+02 1.928e+02 2.299e+02 3.698e+02, threshold=3.856e+02, percent-clipped=0.0 +2023-03-26 08:40:03,743 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39828.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:40:21,662 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39841.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:40:54,100 INFO [finetune.py:976] (5/7) Epoch 7, batch 5500, loss[loss=0.2145, simple_loss=0.2635, pruned_loss=0.08273, over 4452.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2623, pruned_loss=0.06921, over 956767.96 frames. ], batch size: 19, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:41:05,353 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39876.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:41:14,521 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.19 vs. limit=5.0 +2023-03-26 08:41:18,432 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=39889.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:41:57,092 INFO [finetune.py:976] (5/7) Epoch 7, batch 5550, loss[loss=0.1956, simple_loss=0.2723, pruned_loss=0.05944, over 4047.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2649, pruned_loss=0.07089, over 955651.41 frames. ], batch size: 65, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:42:09,016 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-03-26 08:42:09,839 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.056e+02 1.647e+02 1.995e+02 2.278e+02 3.177e+02, threshold=3.991e+02, percent-clipped=0.0 +2023-03-26 08:42:28,027 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39940.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:42:37,917 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4963, 1.5791, 1.6165, 0.9585, 1.5636, 1.8670, 1.8362, 1.4365], + device='cuda:5'), covar=tensor([0.0905, 0.0551, 0.0475, 0.0636, 0.0384, 0.0501, 0.0301, 0.0620], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0157, 0.0120, 0.0137, 0.0132, 0.0125, 0.0145, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.6613e-05, 1.1573e-04, 8.6895e-05, 9.9775e-05, 9.4777e-05, 9.1762e-05, + 1.0668e-04, 1.0754e-04], device='cuda:5') +2023-03-26 08:42:47,652 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4460, 2.0665, 1.8137, 0.7370, 1.9242, 1.9021, 1.7670, 2.0006], + device='cuda:5'), covar=tensor([0.0801, 0.0858, 0.1363, 0.2296, 0.1357, 0.2281, 0.2194, 0.0880], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0202, 0.0203, 0.0190, 0.0220, 0.0207, 0.0224, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:42:58,016 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1167, 2.5927, 2.5917, 1.2718, 2.6848, 2.1678, 2.0855, 2.2751], + device='cuda:5'), covar=tensor([0.0895, 0.0984, 0.1533, 0.2299, 0.1662, 0.2322, 0.2037, 0.1271], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0201, 0.0202, 0.0189, 0.0219, 0.0206, 0.0223, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:42:58,490 INFO [finetune.py:976] (5/7) Epoch 7, batch 5600, loss[loss=0.1973, simple_loss=0.265, pruned_loss=0.06479, over 4923.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2693, pruned_loss=0.07234, over 954868.35 frames. ], batch size: 38, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:43:20,787 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=39988.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:43:30,219 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39995.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 08:43:52,202 INFO [finetune.py:976] (5/7) Epoch 7, batch 5650, loss[loss=0.1969, simple_loss=0.2586, pruned_loss=0.06761, over 4175.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2732, pruned_loss=0.07354, over 953207.78 frames. ], batch size: 65, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:43:52,301 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0741, 1.7234, 1.3795, 0.5181, 1.5507, 1.6669, 1.4567, 1.6985], + device='cuda:5'), covar=tensor([0.0781, 0.0894, 0.1461, 0.2158, 0.1329, 0.2093, 0.2292, 0.0878], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0201, 0.0202, 0.0189, 0.0219, 0.0206, 0.0223, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:44:09,055 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.647e+02 1.995e+02 2.469e+02 4.643e+02, threshold=3.989e+02, percent-clipped=3.0 +2023-03-26 08:44:50,694 INFO [finetune.py:976] (5/7) Epoch 7, batch 5700, loss[loss=0.1772, simple_loss=0.2225, pruned_loss=0.066, over 4355.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2694, pruned_loss=0.07356, over 937560.92 frames. ], batch size: 19, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:44:50,782 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1513, 2.0721, 2.3661, 2.3587, 2.2622, 3.8817, 2.0781, 2.2137], + device='cuda:5'), covar=tensor([0.0811, 0.1487, 0.0907, 0.0852, 0.1321, 0.0234, 0.1165, 0.1466], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0078, 0.0092, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 08:45:42,071 INFO [finetune.py:976] (5/7) Epoch 8, batch 0, loss[loss=0.2595, simple_loss=0.3207, pruned_loss=0.09915, over 4736.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3207, pruned_loss=0.09915, over 4736.00 frames. ], batch size: 54, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:45:42,072 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 08:45:49,283 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4477, 1.1906, 1.2520, 1.3389, 1.6463, 1.5150, 1.3603, 1.2180], + device='cuda:5'), covar=tensor([0.0290, 0.0318, 0.0601, 0.0279, 0.0250, 0.0390, 0.0294, 0.0392], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0113, 0.0140, 0.0116, 0.0105, 0.0101, 0.0092, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.0022e-05, 8.8500e-05, 1.1228e-04, 9.1282e-05, 8.2386e-05, 7.5241e-05, + 6.9340e-05, 8.5588e-05], device='cuda:5') +2023-03-26 08:45:57,866 INFO [finetune.py:1010] (5/7) Epoch 8, validation: loss=0.1624, simple_loss=0.234, pruned_loss=0.04544, over 2265189.00 frames. +2023-03-26 08:45:57,866 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 08:46:20,318 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40119.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:46:20,726 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-26 08:46:26,529 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40123.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:46:29,511 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.031e+02 1.580e+02 2.018e+02 2.508e+02 5.130e+02, threshold=4.036e+02, percent-clipped=1.0 +2023-03-26 08:46:41,217 INFO [finetune.py:976] (5/7) Epoch 8, batch 50, loss[loss=0.1935, simple_loss=0.2608, pruned_loss=0.06316, over 4901.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2723, pruned_loss=0.07475, over 215249.32 frames. ], batch size: 46, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:46:44,270 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5780, 1.4114, 1.4903, 1.5751, 0.9580, 3.1892, 1.2071, 1.7503], + device='cuda:5'), covar=tensor([0.3383, 0.2542, 0.2139, 0.2410, 0.2088, 0.0246, 0.2884, 0.1344], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0116, 0.0120, 0.0123, 0.0117, 0.0099, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 08:47:08,172 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=40167.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:47:10,680 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40171.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:47:13,614 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 08:47:14,328 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7947, 3.7166, 3.5918, 1.9023, 3.8746, 2.7725, 0.8280, 2.6958], + device='cuda:5'), covar=tensor([0.2383, 0.1813, 0.1376, 0.3003, 0.0874, 0.0998, 0.4212, 0.1441], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0172, 0.0161, 0.0130, 0.0155, 0.0122, 0.0145, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 08:47:26,488 INFO [finetune.py:976] (5/7) Epoch 8, batch 100, loss[loss=0.1547, simple_loss=0.2264, pruned_loss=0.04154, over 4702.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2649, pruned_loss=0.07204, over 378830.56 frames. ], batch size: 23, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:47:27,195 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40195.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:47:32,199 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1951, 1.3535, 1.3717, 0.7582, 1.1245, 1.5585, 1.6192, 1.2772], + device='cuda:5'), covar=tensor([0.0906, 0.0469, 0.0477, 0.0510, 0.0450, 0.0480, 0.0264, 0.0702], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0157, 0.0121, 0.0137, 0.0133, 0.0126, 0.0146, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.6892e-05, 1.1612e-04, 8.7611e-05, 1.0006e-04, 9.5540e-05, 9.2499e-05, + 1.0733e-04, 1.0818e-04], device='cuda:5') +2023-03-26 08:47:42,320 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40218.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:47:48,309 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.570e+02 1.832e+02 2.394e+02 3.868e+02, threshold=3.663e+02, percent-clipped=0.0 +2023-03-26 08:47:59,302 INFO [finetune.py:976] (5/7) Epoch 8, batch 150, loss[loss=0.1668, simple_loss=0.2362, pruned_loss=0.04873, over 4783.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2601, pruned_loss=0.06859, over 508210.39 frames. ], batch size: 29, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:48:07,722 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40256.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:48:18,418 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6269, 2.2880, 1.9010, 0.9445, 1.9638, 2.0619, 1.8927, 2.0632], + device='cuda:5'), covar=tensor([0.0961, 0.0808, 0.1562, 0.2075, 0.1412, 0.2189, 0.2170, 0.1026], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0201, 0.0203, 0.0188, 0.0219, 0.0207, 0.0224, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:48:18,420 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40272.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:48:22,650 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40279.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:48:33,050 INFO [finetune.py:976] (5/7) Epoch 8, batch 200, loss[loss=0.1979, simple_loss=0.2587, pruned_loss=0.06853, over 4902.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2604, pruned_loss=0.07001, over 606073.30 frames. ], batch size: 35, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:48:33,764 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40295.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 08:48:55,738 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.652e+02 1.957e+02 2.371e+02 3.958e+02, threshold=3.914e+02, percent-clipped=3.0 +2023-03-26 08:48:57,115 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40330.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:48:58,979 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40333.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:49:05,956 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=40343.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 08:49:06,469 INFO [finetune.py:976] (5/7) Epoch 8, batch 250, loss[loss=0.2517, simple_loss=0.317, pruned_loss=0.09314, over 4822.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2662, pruned_loss=0.07106, over 684154.29 frames. ], batch size: 40, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:49:37,959 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40391.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:49:40,040 INFO [finetune.py:976] (5/7) Epoch 8, batch 300, loss[loss=0.2504, simple_loss=0.3007, pruned_loss=0.1001, over 4202.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2719, pruned_loss=0.07362, over 744424.56 frames. ], batch size: 65, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:50:05,004 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40423.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:50:07,995 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.680e+02 2.022e+02 2.440e+02 4.521e+02, threshold=4.043e+02, percent-clipped=1.0 +2023-03-26 08:50:27,979 INFO [finetune.py:976] (5/7) Epoch 8, batch 350, loss[loss=0.1848, simple_loss=0.2582, pruned_loss=0.05565, over 4818.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2721, pruned_loss=0.07314, over 791804.32 frames. ], batch size: 38, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:51:00,914 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.87 vs. limit=5.0 +2023-03-26 08:51:01,401 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=40471.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:51:01,450 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40471.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:51:26,254 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40492.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:51:27,353 INFO [finetune.py:976] (5/7) Epoch 8, batch 400, loss[loss=0.2538, simple_loss=0.3042, pruned_loss=0.1017, over 4316.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2726, pruned_loss=0.07332, over 825764.37 frames. ], batch size: 66, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:51:27,546 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-03-26 08:51:52,934 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=40519.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:51:58,846 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.015e+02 1.662e+02 2.008e+02 2.590e+02 4.107e+02, threshold=4.016e+02, percent-clipped=2.0 +2023-03-26 08:52:11,113 INFO [finetune.py:976] (5/7) Epoch 8, batch 450, loss[loss=0.183, simple_loss=0.2466, pruned_loss=0.05967, over 4837.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2704, pruned_loss=0.07254, over 854832.29 frames. ], batch size: 49, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:52:21,171 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40551.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:52:26,848 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40553.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:52:41,177 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40574.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:52:46,638 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0850, 1.9027, 1.7675, 2.0127, 2.7130, 2.0934, 1.7936, 1.5192], + device='cuda:5'), covar=tensor([0.2272, 0.2146, 0.1910, 0.1834, 0.1864, 0.1178, 0.2448, 0.1854], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0210, 0.0205, 0.0188, 0.0240, 0.0179, 0.0215, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:52:53,156 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8392, 1.7558, 1.7125, 1.2419, 1.9417, 1.8756, 1.8615, 1.5213], + device='cuda:5'), covar=tensor([0.0565, 0.0700, 0.0769, 0.0889, 0.0612, 0.0704, 0.0622, 0.1166], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0131, 0.0141, 0.0124, 0.0112, 0.0142, 0.0143, 0.0158], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:52:54,278 INFO [finetune.py:976] (5/7) Epoch 8, batch 500, loss[loss=0.1669, simple_loss=0.2322, pruned_loss=0.05086, over 4755.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2681, pruned_loss=0.0721, over 876487.74 frames. ], batch size: 59, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:53:17,852 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.648e+02 1.946e+02 2.379e+02 4.476e+02, threshold=3.892e+02, percent-clipped=1.0 +2023-03-26 08:53:17,931 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40628.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:53:28,115 INFO [finetune.py:976] (5/7) Epoch 8, batch 550, loss[loss=0.199, simple_loss=0.2663, pruned_loss=0.06584, over 4852.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2657, pruned_loss=0.07094, over 894815.18 frames. ], batch size: 47, lr: 3.85e-03, grad_scale: 16.0 +2023-03-26 08:53:30,054 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8276, 1.7727, 1.7545, 1.1471, 1.9350, 1.8979, 1.8365, 1.4995], + device='cuda:5'), covar=tensor([0.0568, 0.0681, 0.0738, 0.0924, 0.0574, 0.0728, 0.0636, 0.1189], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0131, 0.0142, 0.0124, 0.0112, 0.0142, 0.0143, 0.0158], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:53:32,519 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40651.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:53:45,753 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 08:53:49,224 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 08:53:56,760 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40686.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:54:01,538 INFO [finetune.py:976] (5/7) Epoch 8, batch 600, loss[loss=0.2551, simple_loss=0.3167, pruned_loss=0.09669, over 4861.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2665, pruned_loss=0.07157, over 909731.85 frames. ], batch size: 44, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:54:14,587 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40712.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:54:24,591 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.141e+02 1.757e+02 2.080e+02 2.524e+02 4.426e+02, threshold=4.160e+02, percent-clipped=1.0 +2023-03-26 08:54:34,713 INFO [finetune.py:976] (5/7) Epoch 8, batch 650, loss[loss=0.2171, simple_loss=0.2777, pruned_loss=0.07829, over 4756.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2705, pruned_loss=0.07249, over 919723.00 frames. ], batch size: 26, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:54:43,690 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8286, 1.3118, 1.0897, 1.7314, 2.0767, 1.7565, 1.5297, 1.7353], + device='cuda:5'), covar=tensor([0.1376, 0.2139, 0.2219, 0.1191, 0.2163, 0.2378, 0.1439, 0.1740], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0097, 0.0115, 0.0093, 0.0124, 0.0096, 0.0101, 0.0093], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 08:55:08,422 INFO [finetune.py:976] (5/7) Epoch 8, batch 700, loss[loss=0.2442, simple_loss=0.306, pruned_loss=0.09119, over 4727.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2713, pruned_loss=0.07178, over 927786.86 frames. ], batch size: 59, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:55:10,747 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-26 08:55:16,443 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4302, 2.1769, 1.7707, 2.4162, 2.1960, 2.0238, 2.7983, 2.3593], + device='cuda:5'), covar=tensor([0.1449, 0.3136, 0.3696, 0.3231, 0.2953, 0.1756, 0.3750, 0.2097], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0190, 0.0235, 0.0255, 0.0235, 0.0193, 0.0211, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:55:19,301 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2270, 2.0462, 2.2194, 0.9520, 2.3720, 2.5844, 2.2270, 2.1490], + device='cuda:5'), covar=tensor([0.0889, 0.0735, 0.0470, 0.0732, 0.0543, 0.0432, 0.0461, 0.0622], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0120, 0.0137, 0.0132, 0.0125, 0.0145, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.6018e-05, 1.1503e-04, 8.6457e-05, 9.9601e-05, 9.4915e-05, 9.1882e-05, + 1.0700e-04, 1.0741e-04], device='cuda:5') +2023-03-26 08:55:23,467 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8556, 1.2014, 1.7815, 1.7321, 1.5007, 1.4872, 1.6388, 1.5641], + device='cuda:5'), covar=tensor([0.4295, 0.5253, 0.4379, 0.4694, 0.5905, 0.4460, 0.5600, 0.4195], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0242, 0.0255, 0.0254, 0.0245, 0.0222, 0.0272, 0.0226], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:55:31,876 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.702e+02 1.948e+02 2.422e+02 4.930e+02, threshold=3.896e+02, percent-clipped=3.0 +2023-03-26 08:55:51,723 INFO [finetune.py:976] (5/7) Epoch 8, batch 750, loss[loss=0.1839, simple_loss=0.2472, pruned_loss=0.06027, over 4748.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2728, pruned_loss=0.0724, over 934434.35 frames. ], batch size: 27, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:55:59,261 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:56:01,140 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40851.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:56:20,357 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40865.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:56:30,272 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8003, 1.5283, 2.1102, 1.3779, 1.8723, 1.8686, 1.5425, 2.0297], + device='cuda:5'), covar=tensor([0.1364, 0.2008, 0.1371, 0.2180, 0.0924, 0.1606, 0.2680, 0.0953], + device='cuda:5'), in_proj_covar=tensor([0.0204, 0.0206, 0.0199, 0.0196, 0.0182, 0.0220, 0.0220, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:56:32,126 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40874.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:56:55,830 INFO [finetune.py:976] (5/7) Epoch 8, batch 800, loss[loss=0.1992, simple_loss=0.2722, pruned_loss=0.06315, over 4883.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2715, pruned_loss=0.0713, over 938978.16 frames. ], batch size: 35, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:57:03,992 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=40899.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:57:04,674 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40900.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:57:13,255 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.9348, 3.4260, 3.6318, 3.8256, 3.6842, 3.4658, 3.9982, 1.2955], + device='cuda:5'), covar=tensor([0.0804, 0.0771, 0.0769, 0.0929, 0.1207, 0.1445, 0.0745, 0.4872], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0242, 0.0274, 0.0292, 0.0332, 0.0281, 0.0302, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:57:19,444 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4217, 2.0796, 1.6189, 0.7600, 1.8133, 1.8933, 1.7221, 1.9126], + device='cuda:5'), covar=tensor([0.0750, 0.0910, 0.1460, 0.2179, 0.1674, 0.2291, 0.2169, 0.0956], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0201, 0.0202, 0.0189, 0.0219, 0.0207, 0.0224, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:57:24,032 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=40922.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:57:26,526 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40926.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:57:27,576 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.606e+02 1.985e+02 2.397e+02 9.945e+02, threshold=3.971e+02, percent-clipped=3.0 +2023-03-26 08:57:27,712 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40928.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:57:45,606 INFO [finetune.py:976] (5/7) Epoch 8, batch 850, loss[loss=0.1811, simple_loss=0.2425, pruned_loss=0.0598, over 4818.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2685, pruned_loss=0.07064, over 943202.20 frames. ], batch size: 30, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:58:00,375 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40961.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 08:58:10,972 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=40976.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:58:18,105 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40986.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:58:22,823 INFO [finetune.py:976] (5/7) Epoch 8, batch 900, loss[loss=0.2031, simple_loss=0.2591, pruned_loss=0.0736, over 4832.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2647, pruned_loss=0.06945, over 945018.42 frames. ], batch size: 47, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:58:25,183 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40997.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:58:28,389 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.9045, 3.3752, 3.5610, 3.7846, 3.6564, 3.3697, 3.9712, 1.2985], + device='cuda:5'), covar=tensor([0.0898, 0.0940, 0.0895, 0.1029, 0.1441, 0.1695, 0.0817, 0.5078], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0242, 0.0274, 0.0292, 0.0333, 0.0282, 0.0303, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:58:31,501 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41007.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:58:33,966 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3574, 1.3469, 1.2522, 0.9049, 1.4789, 1.4252, 1.4025, 1.2301], + device='cuda:5'), covar=tensor([0.0499, 0.0629, 0.0630, 0.0767, 0.0766, 0.0584, 0.0533, 0.1029], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0132, 0.0142, 0.0124, 0.0113, 0.0143, 0.0144, 0.0159], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:58:41,720 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-03-26 08:58:46,165 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.525e+02 1.868e+02 2.283e+02 3.598e+02, threshold=3.736e+02, percent-clipped=0.0 +2023-03-26 08:58:50,360 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41034.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:58:55,904 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.50 vs. limit=5.0 +2023-03-26 08:58:56,356 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8695, 1.7870, 1.8645, 1.2466, 1.9304, 1.8851, 1.8442, 1.5456], + device='cuda:5'), covar=tensor([0.0620, 0.0690, 0.0675, 0.0906, 0.0573, 0.0730, 0.0687, 0.1172], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0132, 0.0143, 0.0125, 0.0113, 0.0143, 0.0144, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 08:58:56,856 INFO [finetune.py:976] (5/7) Epoch 8, batch 950, loss[loss=0.2096, simple_loss=0.2843, pruned_loss=0.06743, over 4780.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2642, pruned_loss=0.06965, over 944586.70 frames. ], batch size: 29, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:58:57,586 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41045.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:59:06,015 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41058.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:59:30,585 INFO [finetune.py:976] (5/7) Epoch 8, batch 1000, loss[loss=0.1798, simple_loss=0.2397, pruned_loss=0.05995, over 4681.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2661, pruned_loss=0.07034, over 947075.07 frames. ], batch size: 23, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 08:59:36,006 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41102.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:59:38,381 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41106.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 08:59:52,970 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.652e+02 2.000e+02 2.359e+02 4.809e+02, threshold=4.000e+02, percent-clipped=2.0 +2023-03-26 09:00:04,080 INFO [finetune.py:976] (5/7) Epoch 8, batch 1050, loss[loss=0.1672, simple_loss=0.237, pruned_loss=0.04868, over 4732.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2689, pruned_loss=0.07165, over 946958.14 frames. ], batch size: 54, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:00:06,633 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41148.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:00:08,664 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.62 vs. limit=5.0 +2023-03-26 09:00:16,284 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41163.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:00:37,444 INFO [finetune.py:976] (5/7) Epoch 8, batch 1100, loss[loss=0.2319, simple_loss=0.2863, pruned_loss=0.08879, over 4100.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2707, pruned_loss=0.07217, over 949312.23 frames. ], batch size: 65, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:00:38,742 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41196.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:00:45,551 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.41 vs. limit=5.0 +2023-03-26 09:00:54,969 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41221.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:00:59,684 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.750e+02 2.155e+02 2.664e+02 4.791e+02, threshold=4.309e+02, percent-clipped=2.0 +2023-03-26 09:01:17,467 INFO [finetune.py:976] (5/7) Epoch 8, batch 1150, loss[loss=0.2105, simple_loss=0.2785, pruned_loss=0.07121, over 4817.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2722, pruned_loss=0.07304, over 951293.37 frames. ], batch size: 39, lr: 3.84e-03, grad_scale: 32.0 +2023-03-26 09:01:32,474 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41256.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 09:01:42,287 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 09:02:01,444 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 09:02:15,112 INFO [finetune.py:976] (5/7) Epoch 8, batch 1200, loss[loss=0.2261, simple_loss=0.2854, pruned_loss=0.0834, over 4822.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2704, pruned_loss=0.07273, over 952254.49 frames. ], batch size: 38, lr: 3.84e-03, grad_scale: 32.0 +2023-03-26 09:02:24,108 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41307.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:02:30,764 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2985, 1.4413, 1.5193, 0.7970, 1.4346, 1.6788, 1.6853, 1.3441], + device='cuda:5'), covar=tensor([0.0939, 0.0493, 0.0473, 0.0552, 0.0425, 0.0549, 0.0331, 0.0730], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0155, 0.0119, 0.0136, 0.0131, 0.0124, 0.0144, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.5478e-05, 1.1425e-04, 8.5812e-05, 9.8996e-05, 9.3972e-05, 9.1062e-05, + 1.0638e-04, 1.0699e-04], device='cuda:5') +2023-03-26 09:02:37,270 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.297e+01 1.635e+02 1.914e+02 2.289e+02 4.123e+02, threshold=3.829e+02, percent-clipped=0.0 +2023-03-26 09:02:38,591 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5093, 1.3265, 1.8386, 1.3025, 1.5249, 1.6402, 1.2552, 1.8396], + device='cuda:5'), covar=tensor([0.1460, 0.2205, 0.1493, 0.1614, 0.1010, 0.1503, 0.2762, 0.0998], + device='cuda:5'), in_proj_covar=tensor([0.0203, 0.0206, 0.0200, 0.0197, 0.0182, 0.0221, 0.0221, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:02:43,015 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 09:02:51,371 INFO [finetune.py:976] (5/7) Epoch 8, batch 1250, loss[loss=0.2171, simple_loss=0.2612, pruned_loss=0.0865, over 4911.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2678, pruned_loss=0.07196, over 951737.80 frames. ], batch size: 43, lr: 3.84e-03, grad_scale: 32.0 +2023-03-26 09:03:02,690 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41353.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:03:04,421 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41355.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:03:32,987 INFO [finetune.py:976] (5/7) Epoch 8, batch 1300, loss[loss=0.2207, simple_loss=0.2646, pruned_loss=0.08842, over 4725.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2652, pruned_loss=0.0713, over 953316.50 frames. ], batch size: 23, lr: 3.84e-03, grad_scale: 32.0 +2023-03-26 09:03:37,907 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41401.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:03:40,870 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4451, 1.2805, 1.3671, 1.3001, 0.7164, 2.1330, 0.6935, 1.2480], + device='cuda:5'), covar=tensor([0.3303, 0.2530, 0.2164, 0.2463, 0.2236, 0.0426, 0.2894, 0.1388], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0115, 0.0120, 0.0123, 0.0117, 0.0099, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 09:03:49,755 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8502, 1.0866, 1.7414, 1.6936, 1.5059, 1.4729, 1.5983, 1.5898], + device='cuda:5'), covar=tensor([0.4275, 0.5190, 0.4267, 0.4463, 0.5364, 0.4306, 0.5496, 0.4107], + device='cuda:5'), in_proj_covar=tensor([0.0230, 0.0240, 0.0253, 0.0253, 0.0244, 0.0221, 0.0271, 0.0226], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:03:56,246 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.077e+02 1.674e+02 1.900e+02 2.309e+02 4.379e+02, threshold=3.799e+02, percent-clipped=1.0 +2023-03-26 09:04:06,254 INFO [finetune.py:976] (5/7) Epoch 8, batch 1350, loss[loss=0.1575, simple_loss=0.2285, pruned_loss=0.04327, over 4760.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2663, pruned_loss=0.07187, over 954562.18 frames. ], batch size: 27, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:04:16,312 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41458.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:04:36,068 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.77 vs. limit=5.0 +2023-03-26 09:04:39,874 INFO [finetune.py:976] (5/7) Epoch 8, batch 1400, loss[loss=0.18, simple_loss=0.248, pruned_loss=0.05597, over 4783.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2685, pruned_loss=0.07261, over 950211.16 frames. ], batch size: 29, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:04:58,588 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41521.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:05:03,330 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.704e+02 2.004e+02 2.444e+02 3.700e+02, threshold=4.008e+02, percent-clipped=0.0 +2023-03-26 09:05:12,587 INFO [finetune.py:976] (5/7) Epoch 8, batch 1450, loss[loss=0.1786, simple_loss=0.2486, pruned_loss=0.05431, over 4904.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2709, pruned_loss=0.07284, over 952746.15 frames. ], batch size: 43, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:05:21,973 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41556.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 09:05:23,200 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6431, 1.4625, 1.4673, 1.5428, 1.3366, 3.6063, 1.3851, 1.9804], + device='cuda:5'), covar=tensor([0.3297, 0.2529, 0.2075, 0.2387, 0.1748, 0.0170, 0.2796, 0.1335], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0119, 0.0123, 0.0116, 0.0098, 0.0100, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 09:05:26,039 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5550, 1.4664, 1.9650, 2.8756, 1.9632, 2.0766, 0.8245, 2.2847], + device='cuda:5'), covar=tensor([0.1639, 0.1461, 0.1104, 0.0614, 0.0868, 0.1566, 0.1760, 0.0661], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0118, 0.0135, 0.0167, 0.0102, 0.0140, 0.0127, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 09:05:30,811 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41569.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:05:46,317 INFO [finetune.py:976] (5/7) Epoch 8, batch 1500, loss[loss=0.1985, simple_loss=0.2732, pruned_loss=0.06192, over 4890.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2732, pruned_loss=0.07408, over 955398.84 frames. ], batch size: 37, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:05:53,555 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41604.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:06:02,068 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-26 09:06:10,824 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.635e+02 1.924e+02 2.365e+02 3.634e+02, threshold=3.848e+02, percent-clipped=0.0 +2023-03-26 09:06:18,821 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8409, 1.7206, 1.5961, 1.7657, 1.5353, 4.4763, 1.6409, 2.2732], + device='cuda:5'), covar=tensor([0.3393, 0.2481, 0.2236, 0.2386, 0.1738, 0.0116, 0.2635, 0.1330], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0114, 0.0118, 0.0122, 0.0116, 0.0098, 0.0100, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 09:06:22,438 INFO [finetune.py:976] (5/7) Epoch 8, batch 1550, loss[loss=0.2383, simple_loss=0.3053, pruned_loss=0.08567, over 4816.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2729, pruned_loss=0.07379, over 956695.42 frames. ], batch size: 33, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:06:34,420 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41653.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:06:35,075 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41654.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:07:19,720 INFO [finetune.py:976] (5/7) Epoch 8, batch 1600, loss[loss=0.2025, simple_loss=0.2694, pruned_loss=0.06784, over 4891.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2703, pruned_loss=0.07265, over 953345.83 frames. ], batch size: 35, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:07:25,549 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41701.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:07:25,577 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41701.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:07:39,609 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41715.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 09:07:48,908 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.581e+02 1.949e+02 2.490e+02 4.755e+02, threshold=3.899e+02, percent-clipped=2.0 +2023-03-26 09:07:54,040 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 09:07:57,328 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2267, 1.3058, 1.6414, 1.1037, 1.3081, 1.4046, 1.3240, 1.5801], + device='cuda:5'), covar=tensor([0.1045, 0.1808, 0.0999, 0.1204, 0.0740, 0.1040, 0.2443, 0.0680], + device='cuda:5'), in_proj_covar=tensor([0.0201, 0.0203, 0.0196, 0.0195, 0.0179, 0.0219, 0.0218, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:07:58,440 INFO [finetune.py:976] (5/7) Epoch 8, batch 1650, loss[loss=0.17, simple_loss=0.2375, pruned_loss=0.05125, over 4899.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2667, pruned_loss=0.07118, over 955042.70 frames. ], batch size: 32, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:07:59,765 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4382, 1.4314, 1.5072, 0.8073, 1.5443, 1.7167, 1.6335, 1.3896], + device='cuda:5'), covar=tensor([0.1020, 0.0628, 0.0502, 0.0610, 0.0409, 0.0633, 0.0375, 0.0728], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0155, 0.0120, 0.0136, 0.0132, 0.0124, 0.0145, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.5419e-05, 1.1393e-04, 8.6584e-05, 9.8957e-05, 9.4381e-05, 9.1155e-05, + 1.0640e-04, 1.0749e-04], device='cuda:5') +2023-03-26 09:08:01,533 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41749.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:08:03,990 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.7054, 3.9791, 4.2955, 4.4994, 4.4368, 4.1758, 4.7684, 1.4559], + device='cuda:5'), covar=tensor([0.0802, 0.0906, 0.0904, 0.0958, 0.1135, 0.1388, 0.0653, 0.5402], + device='cuda:5'), in_proj_covar=tensor([0.0356, 0.0247, 0.0281, 0.0299, 0.0338, 0.0287, 0.0307, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:08:09,834 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41758.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:08:10,313 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 09:08:21,669 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-03-26 09:08:42,192 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6988, 1.4984, 2.0834, 1.4215, 1.9233, 1.9660, 1.5074, 2.0513], + device='cuda:5'), covar=tensor([0.1312, 0.1934, 0.1286, 0.1776, 0.0725, 0.1297, 0.2631, 0.0814], + device='cuda:5'), in_proj_covar=tensor([0.0201, 0.0203, 0.0197, 0.0196, 0.0180, 0.0220, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:08:42,691 INFO [finetune.py:976] (5/7) Epoch 8, batch 1700, loss[loss=0.2233, simple_loss=0.2847, pruned_loss=0.08092, over 4759.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2659, pruned_loss=0.07107, over 955788.19 frames. ], batch size: 59, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:08:46,724 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0831, 1.9478, 1.5100, 1.7645, 1.9932, 1.7169, 2.2452, 2.0541], + device='cuda:5'), covar=tensor([0.1499, 0.2343, 0.3488, 0.3286, 0.2905, 0.1791, 0.3896, 0.2006], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0190, 0.0235, 0.0255, 0.0236, 0.0194, 0.0212, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:08:50,794 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=41806.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:09:06,690 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.763e+02 2.034e+02 2.335e+02 4.675e+02, threshold=4.069e+02, percent-clipped=2.0 +2023-03-26 09:09:11,551 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0523, 2.0444, 1.9764, 1.3244, 2.1165, 2.2189, 1.9883, 1.7167], + device='cuda:5'), covar=tensor([0.0589, 0.0604, 0.0720, 0.0916, 0.0534, 0.0623, 0.0658, 0.1046], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0132, 0.0142, 0.0125, 0.0113, 0.0143, 0.0144, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:09:16,760 INFO [finetune.py:976] (5/7) Epoch 8, batch 1750, loss[loss=0.2082, simple_loss=0.2798, pruned_loss=0.06833, over 4739.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2668, pruned_loss=0.07133, over 953770.52 frames. ], batch size: 59, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:09:43,636 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.63 vs. limit=5.0 +2023-03-26 09:09:44,780 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1240, 1.6968, 1.8897, 0.7066, 2.1488, 2.3363, 1.8830, 1.7632], + device='cuda:5'), covar=tensor([0.0954, 0.0886, 0.0571, 0.0864, 0.0505, 0.0625, 0.0474, 0.0796], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0155, 0.0120, 0.0136, 0.0132, 0.0125, 0.0145, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.5560e-05, 1.1456e-04, 8.6821e-05, 9.9140e-05, 9.4868e-05, 9.1901e-05, + 1.0651e-04, 1.0800e-04], device='cuda:5') +2023-03-26 09:09:50,590 INFO [finetune.py:976] (5/7) Epoch 8, batch 1800, loss[loss=0.2232, simple_loss=0.2985, pruned_loss=0.07395, over 4891.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2699, pruned_loss=0.07142, over 957245.14 frames. ], batch size: 43, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:09:58,013 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41906.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:10:13,606 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.102e+02 1.792e+02 2.103e+02 2.633e+02 4.479e+02, threshold=4.207e+02, percent-clipped=2.0 +2023-03-26 09:10:23,654 INFO [finetune.py:976] (5/7) Epoch 8, batch 1850, loss[loss=0.2022, simple_loss=0.2703, pruned_loss=0.06707, over 4811.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2709, pruned_loss=0.07156, over 957886.28 frames. ], batch size: 39, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:10:26,672 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41948.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 09:10:38,713 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41967.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:10:57,330 INFO [finetune.py:976] (5/7) Epoch 8, batch 1900, loss[loss=0.179, simple_loss=0.2473, pruned_loss=0.05538, over 4854.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2727, pruned_loss=0.0722, over 957848.91 frames. ], batch size: 31, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:11:02,755 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9720, 1.9121, 1.7108, 1.8840, 2.0379, 1.7250, 2.2501, 2.0005], + device='cuda:5'), covar=tensor([0.1191, 0.1897, 0.2470, 0.2145, 0.2042, 0.1398, 0.2783, 0.1606], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0189, 0.0234, 0.0253, 0.0235, 0.0193, 0.0210, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:11:08,233 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42009.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 09:11:08,799 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42010.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 09:11:15,027 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.81 vs. limit=5.0 +2023-03-26 09:11:22,121 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.207e+02 1.556e+02 1.920e+02 2.218e+02 3.872e+02, threshold=3.841e+02, percent-clipped=0.0 +2023-03-26 09:11:24,074 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2692, 2.2021, 1.7988, 0.8674, 1.9583, 1.8703, 1.5904, 2.0374], + device='cuda:5'), covar=tensor([0.0770, 0.0554, 0.1206, 0.1627, 0.1234, 0.1739, 0.1918, 0.0773], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0201, 0.0203, 0.0188, 0.0220, 0.0208, 0.0223, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:11:32,121 INFO [finetune.py:976] (5/7) Epoch 8, batch 1950, loss[loss=0.2283, simple_loss=0.2888, pruned_loss=0.0839, over 4928.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2706, pruned_loss=0.07111, over 958336.38 frames. ], batch size: 41, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:12:30,959 INFO [finetune.py:976] (5/7) Epoch 8, batch 2000, loss[loss=0.1775, simple_loss=0.2455, pruned_loss=0.05473, over 4816.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2673, pruned_loss=0.07003, over 957084.82 frames. ], batch size: 30, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:12:42,916 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5303, 2.2896, 1.8509, 0.8392, 2.0204, 1.9368, 1.7362, 2.0829], + device='cuda:5'), covar=tensor([0.0932, 0.0844, 0.1578, 0.2124, 0.1526, 0.2313, 0.2168, 0.1024], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0202, 0.0204, 0.0189, 0.0221, 0.0208, 0.0224, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:12:51,997 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6755, 1.6048, 1.5360, 1.5889, 1.0260, 3.1362, 1.2936, 1.8652], + device='cuda:5'), covar=tensor([0.2954, 0.2143, 0.1853, 0.2123, 0.1829, 0.0241, 0.2776, 0.1114], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0115, 0.0119, 0.0123, 0.0117, 0.0099, 0.0101, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 09:12:56,440 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.048e+02 1.506e+02 1.840e+02 2.176e+02 3.856e+02, threshold=3.679e+02, percent-clipped=1.0 +2023-03-26 09:13:06,567 INFO [finetune.py:976] (5/7) Epoch 8, batch 2050, loss[loss=0.2048, simple_loss=0.264, pruned_loss=0.0728, over 4820.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.263, pruned_loss=0.06821, over 955845.01 frames. ], batch size: 39, lr: 3.84e-03, grad_scale: 16.0 +2023-03-26 09:13:24,472 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0278, 1.9255, 1.6907, 2.0851, 2.7226, 2.0602, 2.0215, 1.5365], + device='cuda:5'), covar=tensor([0.2159, 0.2001, 0.1796, 0.1637, 0.1874, 0.1122, 0.2145, 0.1725], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0210, 0.0205, 0.0188, 0.0239, 0.0178, 0.0214, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:13:53,279 INFO [finetune.py:976] (5/7) Epoch 8, batch 2100, loss[loss=0.2108, simple_loss=0.271, pruned_loss=0.07525, over 4940.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2622, pruned_loss=0.06805, over 956488.25 frames. ], batch size: 33, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:14:16,281 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.710e+02 1.945e+02 2.376e+02 4.149e+02, threshold=3.889e+02, percent-clipped=2.0 +2023-03-26 09:14:25,436 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5022, 1.4019, 1.4064, 0.7542, 1.4645, 1.6637, 1.6369, 1.2706], + device='cuda:5'), covar=tensor([0.0968, 0.0580, 0.0414, 0.0639, 0.0485, 0.0521, 0.0339, 0.0724], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0155, 0.0120, 0.0135, 0.0131, 0.0125, 0.0144, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.5358e-05, 1.1405e-04, 8.6593e-05, 9.8297e-05, 9.3728e-05, 9.1507e-05, + 1.0615e-04, 1.0756e-04], device='cuda:5') +2023-03-26 09:14:26,997 INFO [finetune.py:976] (5/7) Epoch 8, batch 2150, loss[loss=0.2441, simple_loss=0.3041, pruned_loss=0.092, over 4761.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2667, pruned_loss=0.0697, over 956133.43 frames. ], batch size: 54, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:14:38,881 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42262.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:15:18,053 INFO [finetune.py:976] (5/7) Epoch 8, batch 2200, loss[loss=0.2314, simple_loss=0.2812, pruned_loss=0.09084, over 4238.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2692, pruned_loss=0.07111, over 955035.74 frames. ], batch size: 65, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:15:25,349 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42304.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 09:15:29,028 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42310.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 09:15:45,961 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.543e+02 1.921e+02 2.479e+02 5.347e+02, threshold=3.843e+02, percent-clipped=1.0 +2023-03-26 09:16:07,397 INFO [finetune.py:976] (5/7) Epoch 8, batch 2250, loss[loss=0.2536, simple_loss=0.3127, pruned_loss=0.0972, over 4832.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2723, pruned_loss=0.07238, over 955265.58 frames. ], batch size: 30, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:16:27,426 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=42358.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:16:47,901 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42379.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:17:09,030 INFO [finetune.py:976] (5/7) Epoch 8, batch 2300, loss[loss=0.1845, simple_loss=0.2423, pruned_loss=0.06333, over 4707.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.272, pruned_loss=0.07155, over 955564.03 frames. ], batch size: 23, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:17:57,226 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.935e+01 1.473e+02 1.816e+02 2.175e+02 3.275e+02, threshold=3.633e+02, percent-clipped=0.0 +2023-03-26 09:18:06,508 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42440.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:18:09,265 INFO [finetune.py:976] (5/7) Epoch 8, batch 2350, loss[loss=0.1939, simple_loss=0.2561, pruned_loss=0.06584, over 4762.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2684, pruned_loss=0.07016, over 955001.28 frames. ], batch size: 26, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:18:51,872 INFO [finetune.py:976] (5/7) Epoch 8, batch 2400, loss[loss=0.1761, simple_loss=0.2454, pruned_loss=0.0534, over 4914.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2651, pruned_loss=0.06913, over 955739.41 frames. ], batch size: 37, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:19:02,413 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42506.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:19:15,987 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.58 vs. limit=5.0 +2023-03-26 09:19:25,686 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.517e+02 1.798e+02 2.223e+02 5.682e+02, threshold=3.597e+02, percent-clipped=2.0 +2023-03-26 09:19:35,414 INFO [finetune.py:976] (5/7) Epoch 8, batch 2450, loss[loss=0.1799, simple_loss=0.2317, pruned_loss=0.06398, over 4764.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2625, pruned_loss=0.06873, over 953929.59 frames. ], batch size: 26, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:19:35,565 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7017, 0.6586, 1.6722, 1.5517, 1.4665, 1.4273, 1.4109, 1.5759], + device='cuda:5'), covar=tensor([0.4019, 0.4927, 0.4166, 0.4310, 0.5475, 0.3953, 0.5482, 0.3974], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0241, 0.0254, 0.0254, 0.0245, 0.0222, 0.0273, 0.0227], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:19:48,005 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42562.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:19:51,981 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42567.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:20:08,914 INFO [finetune.py:976] (5/7) Epoch 8, batch 2500, loss[loss=0.1695, simple_loss=0.2369, pruned_loss=0.05106, over 4886.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2643, pruned_loss=0.06981, over 955202.63 frames. ], batch size: 32, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:20:16,164 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42604.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 09:20:20,741 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=42610.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:20:33,719 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.056e+02 1.709e+02 1.979e+02 2.316e+02 5.134e+02, threshold=3.959e+02, percent-clipped=4.0 +2023-03-26 09:20:36,925 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-26 09:20:42,884 INFO [finetune.py:976] (5/7) Epoch 8, batch 2550, loss[loss=0.2121, simple_loss=0.2726, pruned_loss=0.07581, over 4800.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2678, pruned_loss=0.07086, over 953676.17 frames. ], batch size: 25, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:20:46,539 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42649.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:20:53,417 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=42652.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 09:21:20,515 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7052, 1.6370, 1.4764, 1.7228, 2.2524, 1.8998, 1.4265, 1.4365], + device='cuda:5'), covar=tensor([0.2506, 0.2241, 0.1991, 0.1946, 0.1968, 0.1121, 0.2815, 0.1965], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0209, 0.0204, 0.0187, 0.0239, 0.0178, 0.0213, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:21:25,355 INFO [finetune.py:976] (5/7) Epoch 8, batch 2600, loss[loss=0.206, simple_loss=0.2687, pruned_loss=0.07171, over 4822.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2678, pruned_loss=0.07092, over 952926.70 frames. ], batch size: 38, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:21:36,671 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42710.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:21:39,727 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1824, 1.9699, 1.7802, 2.0661, 1.9807, 1.9023, 1.9384, 2.7422], + device='cuda:5'), covar=tensor([0.5363, 0.6637, 0.4486, 0.5780, 0.5234, 0.3403, 0.5440, 0.2138], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0222, 0.0282, 0.0242, 0.0207, 0.0245, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:21:49,603 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.153e+02 1.771e+02 2.168e+02 2.787e+02 4.495e+02, threshold=4.337e+02, percent-clipped=4.0 +2023-03-26 09:21:53,824 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42735.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:21:59,152 INFO [finetune.py:976] (5/7) Epoch 8, batch 2650, loss[loss=0.2285, simple_loss=0.2905, pruned_loss=0.08323, over 4900.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2691, pruned_loss=0.07135, over 952754.49 frames. ], batch size: 36, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:22:16,992 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6962, 1.2480, 0.9014, 1.6609, 2.0607, 1.3951, 1.5762, 1.6444], + device='cuda:5'), covar=tensor([0.1411, 0.1996, 0.1924, 0.1133, 0.1977, 0.1877, 0.1445, 0.1885], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0097, 0.0114, 0.0092, 0.0123, 0.0095, 0.0100, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 09:22:26,299 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 09:22:40,397 INFO [finetune.py:976] (5/7) Epoch 8, batch 2700, loss[loss=0.1654, simple_loss=0.2287, pruned_loss=0.05098, over 4061.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2672, pruned_loss=0.07027, over 952361.75 frames. ], batch size: 17, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:23:05,661 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6147, 2.3693, 2.0910, 1.0330, 2.2504, 1.9550, 1.7720, 2.1879], + device='cuda:5'), covar=tensor([0.0845, 0.0855, 0.1462, 0.2076, 0.1441, 0.2109, 0.2198, 0.1059], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0201, 0.0187, 0.0217, 0.0206, 0.0223, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:23:27,703 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.159e+02 1.604e+02 1.897e+02 2.218e+02 3.599e+02, threshold=3.793e+02, percent-clipped=0.0 +2023-03-26 09:23:46,886 INFO [finetune.py:976] (5/7) Epoch 8, batch 2750, loss[loss=0.2114, simple_loss=0.2597, pruned_loss=0.08158, over 4259.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2648, pruned_loss=0.06979, over 954040.45 frames. ], batch size: 18, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:23:56,421 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42859.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:24:02,856 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42862.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:24:14,754 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42879.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:24:22,769 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1469, 2.1413, 2.0881, 1.6930, 2.1557, 2.3296, 2.2696, 1.9045], + device='cuda:5'), covar=tensor([0.0483, 0.0505, 0.0689, 0.0801, 0.0572, 0.0518, 0.0500, 0.0911], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0133, 0.0144, 0.0125, 0.0114, 0.0144, 0.0144, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:24:31,685 INFO [finetune.py:976] (5/7) Epoch 8, batch 2800, loss[loss=0.2127, simple_loss=0.2659, pruned_loss=0.07979, over 4817.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.261, pruned_loss=0.0684, over 952717.01 frames. ], batch size: 41, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:24:37,213 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7178, 1.0988, 0.9329, 1.6196, 2.1543, 1.0457, 1.5630, 1.4755], + device='cuda:5'), covar=tensor([0.1534, 0.2314, 0.1977, 0.1195, 0.1875, 0.1970, 0.1527, 0.2111], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0096, 0.0113, 0.0091, 0.0123, 0.0095, 0.0100, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 09:24:48,573 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42920.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:24:54,864 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.137e+02 1.632e+02 1.941e+02 2.379e+02 3.960e+02, threshold=3.882e+02, percent-clipped=2.0 +2023-03-26 09:25:02,611 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42940.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:25:04,921 INFO [finetune.py:976] (5/7) Epoch 8, batch 2850, loss[loss=0.1949, simple_loss=0.2575, pruned_loss=0.06615, over 4915.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2608, pruned_loss=0.06806, over 955000.66 frames. ], batch size: 43, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:25:14,701 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5110, 1.3458, 1.3070, 1.5597, 1.4490, 1.5692, 0.8837, 1.2965], + device='cuda:5'), covar=tensor([0.2129, 0.2183, 0.1864, 0.1710, 0.1867, 0.1192, 0.2798, 0.1813], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0210, 0.0206, 0.0188, 0.0241, 0.0179, 0.0214, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:25:38,297 INFO [finetune.py:976] (5/7) Epoch 8, batch 2900, loss[loss=0.2558, simple_loss=0.3215, pruned_loss=0.09506, over 4872.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2648, pruned_loss=0.06979, over 953941.27 frames. ], batch size: 31, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:25:44,537 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 09:25:45,639 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43005.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:26:03,397 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.926e+01 1.731e+02 1.970e+02 2.373e+02 5.777e+02, threshold=3.941e+02, percent-clipped=2.0 +2023-03-26 09:26:13,022 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43035.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:26:18,884 INFO [finetune.py:976] (5/7) Epoch 8, batch 2950, loss[loss=0.2439, simple_loss=0.2889, pruned_loss=0.09944, over 4823.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2669, pruned_loss=0.06989, over 954640.94 frames. ], batch size: 30, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:26:42,190 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2237, 2.2273, 1.8269, 2.3062, 2.1720, 2.0707, 2.1186, 2.9944], + device='cuda:5'), covar=tensor([0.5029, 0.6104, 0.4281, 0.5652, 0.5057, 0.3151, 0.5520, 0.1891], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0259, 0.0221, 0.0281, 0.0241, 0.0206, 0.0245, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:26:44,530 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=43083.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:26:44,665 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 09:26:52,571 INFO [finetune.py:976] (5/7) Epoch 8, batch 3000, loss[loss=0.2273, simple_loss=0.2822, pruned_loss=0.08623, over 4814.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2712, pruned_loss=0.07228, over 956985.30 frames. ], batch size: 33, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:26:52,571 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 09:27:01,154 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0138, 1.8247, 1.6771, 1.7789, 1.8189, 1.8201, 1.7955, 2.4793], + device='cuda:5'), covar=tensor([0.5455, 0.6058, 0.4382, 0.5118, 0.5020, 0.3169, 0.5567, 0.2179], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0221, 0.0281, 0.0241, 0.0206, 0.0245, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:27:10,878 INFO [finetune.py:1010] (5/7) Epoch 8, validation: loss=0.16, simple_loss=0.2311, pruned_loss=0.04446, over 2265189.00 frames. +2023-03-26 09:27:10,879 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 09:27:23,301 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0632, 1.6440, 1.9147, 1.9177, 1.6491, 1.6858, 1.8713, 1.8425], + device='cuda:5'), covar=tensor([0.5552, 0.6762, 0.5212, 0.6385, 0.7516, 0.5457, 0.7826, 0.4915], + device='cuda:5'), in_proj_covar=tensor([0.0231, 0.0241, 0.0253, 0.0254, 0.0245, 0.0222, 0.0273, 0.0226], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:27:49,858 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.022e+02 1.686e+02 2.049e+02 2.426e+02 3.920e+02, threshold=4.099e+02, percent-clipped=0.0 +2023-03-26 09:28:00,452 INFO [finetune.py:976] (5/7) Epoch 8, batch 3050, loss[loss=0.2106, simple_loss=0.2741, pruned_loss=0.07356, over 4778.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2716, pruned_loss=0.07227, over 954422.84 frames. ], batch size: 25, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:28:13,459 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43162.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:28:36,036 INFO [finetune.py:976] (5/7) Epoch 8, batch 3100, loss[loss=0.2101, simple_loss=0.2767, pruned_loss=0.07174, over 4904.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2695, pruned_loss=0.07157, over 955338.44 frames. ], batch size: 32, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:28:50,454 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2262, 1.3759, 1.4985, 0.6788, 1.3646, 1.6001, 1.6907, 1.3703], + device='cuda:5'), covar=tensor([0.0969, 0.0619, 0.0529, 0.0576, 0.0554, 0.0669, 0.0417, 0.0815], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0155, 0.0119, 0.0135, 0.0131, 0.0123, 0.0144, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.4717e-05, 1.1387e-04, 8.5615e-05, 9.7699e-05, 9.3751e-05, 9.0522e-05, + 1.0550e-04, 1.0731e-04], device='cuda:5') +2023-03-26 09:28:52,834 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=43210.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:29:01,133 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43215.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:29:04,211 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.0779, 4.4610, 4.6065, 4.8655, 4.7814, 4.4982, 5.1785, 1.4205], + device='cuda:5'), covar=tensor([0.0681, 0.0745, 0.0680, 0.0742, 0.1107, 0.1502, 0.0469, 0.5885], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0241, 0.0275, 0.0292, 0.0330, 0.0282, 0.0300, 0.0292], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:29:14,672 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.165e+02 1.652e+02 1.930e+02 2.337e+02 4.149e+02, threshold=3.860e+02, percent-clipped=1.0 +2023-03-26 09:29:23,816 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43235.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:29:34,859 INFO [finetune.py:976] (5/7) Epoch 8, batch 3150, loss[loss=0.1889, simple_loss=0.251, pruned_loss=0.06339, over 4841.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2661, pruned_loss=0.07055, over 955946.59 frames. ], batch size: 47, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:29:54,640 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.90 vs. limit=5.0 +2023-03-26 09:30:24,893 INFO [finetune.py:976] (5/7) Epoch 8, batch 3200, loss[loss=0.2155, simple_loss=0.2798, pruned_loss=0.07562, over 4930.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2624, pruned_loss=0.0687, over 956237.88 frames. ], batch size: 38, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:30:33,139 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43305.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:30:46,585 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9667, 4.6184, 4.4225, 2.1664, 4.6220, 3.4612, 0.7327, 3.2450], + device='cuda:5'), covar=tensor([0.2515, 0.1926, 0.1275, 0.3382, 0.0733, 0.0926, 0.4995, 0.1407], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0171, 0.0159, 0.0129, 0.0155, 0.0122, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 09:30:49,478 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.668e+02 2.087e+02 2.541e+02 1.424e+03, threshold=4.174e+02, percent-clipped=3.0 +2023-03-26 09:31:03,657 INFO [finetune.py:976] (5/7) Epoch 8, batch 3250, loss[loss=0.1788, simple_loss=0.2627, pruned_loss=0.04746, over 4801.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2628, pruned_loss=0.06895, over 956196.70 frames. ], batch size: 41, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:31:15,390 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=43353.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:31:59,776 INFO [finetune.py:976] (5/7) Epoch 8, batch 3300, loss[loss=0.2284, simple_loss=0.2892, pruned_loss=0.08377, over 4817.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2673, pruned_loss=0.07051, over 956770.64 frames. ], batch size: 33, lr: 3.83e-03, grad_scale: 16.0 +2023-03-26 09:32:42,882 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 09:32:45,625 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.691e+02 2.029e+02 2.462e+02 4.055e+02, threshold=4.059e+02, percent-clipped=0.0 +2023-03-26 09:33:04,670 INFO [finetune.py:976] (5/7) Epoch 8, batch 3350, loss[loss=0.2151, simple_loss=0.2854, pruned_loss=0.0724, over 4824.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2692, pruned_loss=0.07105, over 954613.62 frames. ], batch size: 47, lr: 3.83e-03, grad_scale: 32.0 +2023-03-26 09:33:36,474 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7283, 1.1497, 1.7383, 1.6535, 1.5041, 1.4533, 1.5351, 1.5609], + device='cuda:5'), covar=tensor([0.5035, 0.5983, 0.5006, 0.5297, 0.6521, 0.5059, 0.6532, 0.4747], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0241, 0.0254, 0.0254, 0.0245, 0.0222, 0.0273, 0.0227], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:33:51,120 INFO [finetune.py:976] (5/7) Epoch 8, batch 3400, loss[loss=0.2066, simple_loss=0.2757, pruned_loss=0.06874, over 4878.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2716, pruned_loss=0.07213, over 954841.69 frames. ], batch size: 32, lr: 3.83e-03, grad_scale: 32.0 +2023-03-26 09:33:51,257 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2073, 1.2916, 1.3685, 0.6207, 1.2143, 1.4813, 1.5543, 1.2033], + device='cuda:5'), covar=tensor([0.0797, 0.0452, 0.0403, 0.0488, 0.0414, 0.0494, 0.0274, 0.0619], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0155, 0.0119, 0.0135, 0.0132, 0.0124, 0.0145, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.5323e-05, 1.1422e-04, 8.6129e-05, 9.8387e-05, 9.4437e-05, 9.1138e-05, + 1.0627e-04, 1.0821e-04], device='cuda:5') +2023-03-26 09:34:00,997 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3023, 1.1671, 1.1855, 1.3515, 1.5722, 1.4657, 1.3107, 1.1103], + device='cuda:5'), covar=tensor([0.0313, 0.0284, 0.0511, 0.0249, 0.0193, 0.0346, 0.0254, 0.0354], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0110, 0.0139, 0.0115, 0.0104, 0.0101, 0.0090, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.0414e-05, 8.6555e-05, 1.1139e-04, 9.0647e-05, 8.1407e-05, 7.5008e-05, + 6.8175e-05, 8.4389e-05], device='cuda:5') +2023-03-26 09:34:04,484 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43514.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:34:05,106 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43515.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:34:15,395 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.089e+02 1.653e+02 1.866e+02 2.233e+02 4.638e+02, threshold=3.733e+02, percent-clipped=1.0 +2023-03-26 09:34:18,542 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-03-26 09:34:19,630 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43535.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:34:25,009 INFO [finetune.py:976] (5/7) Epoch 8, batch 3450, loss[loss=0.174, simple_loss=0.2485, pruned_loss=0.04975, over 4810.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2716, pruned_loss=0.07171, over 954693.46 frames. ], batch size: 41, lr: 3.83e-03, grad_scale: 32.0 +2023-03-26 09:34:43,317 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=43563.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:34:45,252 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2868, 1.7833, 2.1912, 2.2551, 1.9257, 1.9340, 2.1195, 2.0263], + device='cuda:5'), covar=tensor([0.4620, 0.5971, 0.4508, 0.5000, 0.6499, 0.4544, 0.6726, 0.4447], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0241, 0.0254, 0.0254, 0.0246, 0.0223, 0.0273, 0.0227], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:34:55,926 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43575.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:35:02,198 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=43583.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:35:08,910 INFO [finetune.py:976] (5/7) Epoch 8, batch 3500, loss[loss=0.2658, simple_loss=0.3084, pruned_loss=0.1116, over 4864.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2678, pruned_loss=0.07084, over 956744.51 frames. ], batch size: 34, lr: 3.83e-03, grad_scale: 32.0 +2023-03-26 09:35:34,552 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.655e+02 2.028e+02 2.395e+02 4.370e+02, threshold=4.057e+02, percent-clipped=4.0 +2023-03-26 09:35:44,700 INFO [finetune.py:976] (5/7) Epoch 8, batch 3550, loss[loss=0.2023, simple_loss=0.2591, pruned_loss=0.07268, over 4919.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2653, pruned_loss=0.0703, over 957639.43 frames. ], batch size: 37, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:36:18,004 INFO [finetune.py:976] (5/7) Epoch 8, batch 3600, loss[loss=0.184, simple_loss=0.2518, pruned_loss=0.05806, over 4715.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2614, pruned_loss=0.06879, over 957903.74 frames. ], batch size: 54, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:36:40,280 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.641e+02 1.836e+02 2.142e+02 3.900e+02, threshold=3.673e+02, percent-clipped=0.0 +2023-03-26 09:37:03,253 INFO [finetune.py:976] (5/7) Epoch 8, batch 3650, loss[loss=0.2115, simple_loss=0.284, pruned_loss=0.06947, over 4899.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2649, pruned_loss=0.07086, over 957197.75 frames. ], batch size: 37, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:37:41,727 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43787.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:37:50,873 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9579, 1.3839, 1.9058, 1.8823, 1.6455, 1.6286, 1.7558, 1.7699], + device='cuda:5'), covar=tensor([0.4718, 0.6055, 0.4643, 0.5305, 0.6114, 0.5158, 0.6805, 0.4556], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0241, 0.0254, 0.0254, 0.0245, 0.0222, 0.0273, 0.0227], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:37:53,055 INFO [finetune.py:976] (5/7) Epoch 8, batch 3700, loss[loss=0.2112, simple_loss=0.2625, pruned_loss=0.08001, over 3973.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2691, pruned_loss=0.07241, over 954403.30 frames. ], batch size: 17, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:38:04,065 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0466, 0.9328, 0.8918, 1.1567, 1.1948, 1.1290, 0.9751, 0.9470], + device='cuda:5'), covar=tensor([0.0323, 0.0292, 0.0560, 0.0276, 0.0257, 0.0475, 0.0333, 0.0402], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0112, 0.0141, 0.0116, 0.0104, 0.0102, 0.0091, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.1259e-05, 8.7503e-05, 1.1242e-04, 9.1602e-05, 8.1743e-05, 7.5606e-05, + 6.9217e-05, 8.5343e-05], device='cuda:5') +2023-03-26 09:38:24,488 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-26 09:38:37,142 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.066e+01 1.637e+02 2.055e+02 2.501e+02 4.825e+02, threshold=4.110e+02, percent-clipped=4.0 +2023-03-26 09:38:45,757 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 09:38:56,862 INFO [finetune.py:976] (5/7) Epoch 8, batch 3750, loss[loss=0.1946, simple_loss=0.2578, pruned_loss=0.06566, over 4153.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.271, pruned_loss=0.07335, over 954795.00 frames. ], batch size: 66, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:39:00,423 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:39:13,801 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43870.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:39:22,132 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1188, 1.6701, 1.9319, 1.9605, 1.7042, 1.7103, 1.8495, 1.7505], + device='cuda:5'), covar=tensor([0.6761, 0.7306, 0.6552, 0.7154, 0.8895, 0.6958, 1.0013, 0.6373], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0242, 0.0255, 0.0255, 0.0247, 0.0223, 0.0274, 0.0228], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:39:30,698 INFO [finetune.py:976] (5/7) Epoch 8, batch 3800, loss[loss=0.2282, simple_loss=0.2911, pruned_loss=0.08267, over 4903.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2723, pruned_loss=0.07352, over 956273.99 frames. ], batch size: 38, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:39:40,902 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43909.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:39:42,917 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 09:40:01,519 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.167e+02 1.597e+02 1.980e+02 2.453e+02 5.062e+02, threshold=3.959e+02, percent-clipped=3.0 +2023-03-26 09:40:09,154 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0410, 1.3762, 1.9594, 1.9326, 1.7210, 1.7014, 1.7855, 1.8166], + device='cuda:5'), covar=tensor([0.4333, 0.5516, 0.4534, 0.4648, 0.6149, 0.4627, 0.6264, 0.4360], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0241, 0.0254, 0.0254, 0.0246, 0.0223, 0.0273, 0.0227], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:40:16,146 INFO [finetune.py:976] (5/7) Epoch 8, batch 3850, loss[loss=0.1948, simple_loss=0.2455, pruned_loss=0.07205, over 4717.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2698, pruned_loss=0.07153, over 958468.33 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:40:26,751 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43959.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:40:33,732 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43970.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:40:54,212 INFO [finetune.py:976] (5/7) Epoch 8, batch 3900, loss[loss=0.1751, simple_loss=0.2314, pruned_loss=0.05942, over 4706.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.265, pruned_loss=0.06939, over 955133.79 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:41:14,981 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-26 09:41:17,070 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44020.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:41:22,369 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.530e+02 1.807e+02 2.225e+02 3.627e+02, threshold=3.614e+02, percent-clipped=0.0 +2023-03-26 09:41:32,537 INFO [finetune.py:976] (5/7) Epoch 8, batch 3950, loss[loss=0.1734, simple_loss=0.2376, pruned_loss=0.05462, over 4772.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2617, pruned_loss=0.06781, over 956249.67 frames. ], batch size: 28, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:42:06,493 INFO [finetune.py:976] (5/7) Epoch 8, batch 4000, loss[loss=0.1739, simple_loss=0.2254, pruned_loss=0.06124, over 4076.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2618, pruned_loss=0.06843, over 955984.95 frames. ], batch size: 17, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:42:28,360 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9947, 1.9642, 1.5708, 1.9234, 1.7999, 1.7552, 1.8623, 2.5260], + device='cuda:5'), covar=tensor([0.5072, 0.5747, 0.4135, 0.5052, 0.5399, 0.3087, 0.4792, 0.1964], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0221, 0.0280, 0.0241, 0.0206, 0.0244, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:42:37,514 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.105e+02 1.793e+02 2.145e+02 2.588e+02 4.712e+02, threshold=4.291e+02, percent-clipped=10.0 +2023-03-26 09:42:56,505 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44143.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:42:57,046 INFO [finetune.py:976] (5/7) Epoch 8, batch 4050, loss[loss=0.2266, simple_loss=0.285, pruned_loss=0.08409, over 4888.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2652, pruned_loss=0.06972, over 955291.18 frames. ], batch size: 32, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:43:31,064 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44170.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:43:49,920 INFO [finetune.py:976] (5/7) Epoch 8, batch 4100, loss[loss=0.2261, simple_loss=0.2751, pruned_loss=0.08854, over 4102.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2696, pruned_loss=0.07198, over 954106.12 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:43:50,545 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8757, 1.7113, 1.4586, 1.7188, 1.6416, 1.6258, 1.6422, 2.3263], + device='cuda:5'), covar=tensor([0.5398, 0.6326, 0.4168, 0.5846, 0.5435, 0.3220, 0.5714, 0.2278], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0220, 0.0279, 0.0240, 0.0205, 0.0244, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:44:15,791 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=44218.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:44:22,433 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.086e+02 1.696e+02 1.895e+02 2.360e+02 4.949e+02, threshold=3.791e+02, percent-clipped=1.0 +2023-03-26 09:44:31,523 INFO [finetune.py:976] (5/7) Epoch 8, batch 4150, loss[loss=0.1822, simple_loss=0.2488, pruned_loss=0.05782, over 4740.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2705, pruned_loss=0.07241, over 952367.39 frames. ], batch size: 27, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:44:46,754 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44265.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:45:07,273 INFO [finetune.py:976] (5/7) Epoch 8, batch 4200, loss[loss=0.2385, simple_loss=0.2957, pruned_loss=0.09069, over 4889.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2704, pruned_loss=0.07215, over 951815.25 frames. ], batch size: 43, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:45:30,591 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44315.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:45:37,069 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.59 vs. limit=5.0 +2023-03-26 09:45:39,446 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 09:45:40,453 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.746e+02 1.975e+02 2.337e+02 5.106e+02, threshold=3.951e+02, percent-clipped=1.0 +2023-03-26 09:45:54,907 INFO [finetune.py:976] (5/7) Epoch 8, batch 4250, loss[loss=0.182, simple_loss=0.2438, pruned_loss=0.06007, over 4928.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2684, pruned_loss=0.07146, over 954632.15 frames. ], batch size: 33, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:45:56,853 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9688, 2.0102, 2.3171, 2.1791, 2.1626, 3.8189, 1.8818, 2.1149], + device='cuda:5'), covar=tensor([0.0827, 0.1354, 0.0822, 0.0838, 0.1297, 0.0338, 0.1294, 0.1443], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0076, 0.0079, 0.0092, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 09:46:32,567 INFO [finetune.py:976] (5/7) Epoch 8, batch 4300, loss[loss=0.1904, simple_loss=0.2527, pruned_loss=0.06401, over 4817.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2654, pruned_loss=0.0706, over 955503.04 frames. ], batch size: 39, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:46:56,828 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.550e+02 1.851e+02 2.365e+02 4.860e+02, threshold=3.701e+02, percent-clipped=2.0 +2023-03-26 09:46:58,130 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8823, 1.1739, 0.9142, 1.8991, 2.2879, 1.8919, 1.5210, 2.0045], + device='cuda:5'), covar=tensor([0.1452, 0.2186, 0.2079, 0.1124, 0.1918, 0.1875, 0.1414, 0.1749], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0097, 0.0113, 0.0091, 0.0122, 0.0095, 0.0099, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 09:47:05,832 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44443.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:47:06,356 INFO [finetune.py:976] (5/7) Epoch 8, batch 4350, loss[loss=0.2149, simple_loss=0.2638, pruned_loss=0.083, over 4898.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2619, pruned_loss=0.06903, over 954813.22 frames. ], batch size: 32, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:47:37,687 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=44491.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:47:39,433 INFO [finetune.py:976] (5/7) Epoch 8, batch 4400, loss[loss=0.2093, simple_loss=0.2699, pruned_loss=0.07434, over 4811.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2637, pruned_loss=0.07024, over 954873.56 frames. ], batch size: 51, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:47:44,262 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-26 09:48:17,056 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44527.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:48:18,612 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.746e+02 1.995e+02 2.515e+02 6.158e+02, threshold=3.991e+02, percent-clipped=2.0 +2023-03-26 09:48:28,721 INFO [finetune.py:976] (5/7) Epoch 8, batch 4450, loss[loss=0.2247, simple_loss=0.2976, pruned_loss=0.07587, over 4755.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2677, pruned_loss=0.07162, over 954529.50 frames. ], batch size: 59, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:48:29,520 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 09:48:37,006 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44550.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:48:51,542 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44565.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:49:19,364 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44588.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:49:28,022 INFO [finetune.py:976] (5/7) Epoch 8, batch 4500, loss[loss=0.2976, simple_loss=0.3391, pruned_loss=0.128, over 4178.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2691, pruned_loss=0.0724, over 953701.29 frames. ], batch size: 65, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:49:37,978 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-03-26 09:49:47,673 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44611.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:49:48,882 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=44613.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:49:50,664 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44615.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:49:57,843 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0901, 1.8248, 1.4230, 0.5368, 1.6165, 1.6732, 1.5310, 1.7258], + device='cuda:5'), covar=tensor([0.0833, 0.0793, 0.1524, 0.2171, 0.1402, 0.2279, 0.2196, 0.0846], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0201, 0.0188, 0.0216, 0.0206, 0.0221, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:50:00,000 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.720e+02 2.026e+02 2.465e+02 5.780e+02, threshold=4.053e+02, percent-clipped=2.0 +2023-03-26 09:50:10,542 INFO [finetune.py:976] (5/7) Epoch 8, batch 4550, loss[loss=0.2052, simple_loss=0.2786, pruned_loss=0.06592, over 4805.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2709, pruned_loss=0.07282, over 953739.99 frames. ], batch size: 29, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:50:26,317 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.69 vs. limit=5.0 +2023-03-26 09:50:27,736 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=44663.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:50:52,809 INFO [finetune.py:976] (5/7) Epoch 8, batch 4600, loss[loss=0.1842, simple_loss=0.2485, pruned_loss=0.05995, over 4106.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2683, pruned_loss=0.07078, over 953045.14 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:51:15,459 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.778e+01 1.566e+02 1.839e+02 2.114e+02 3.234e+02, threshold=3.678e+02, percent-clipped=0.0 +2023-03-26 09:51:25,982 INFO [finetune.py:976] (5/7) Epoch 8, batch 4650, loss[loss=0.2165, simple_loss=0.2549, pruned_loss=0.08905, over 4405.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2665, pruned_loss=0.07051, over 953148.61 frames. ], batch size: 19, lr: 3.82e-03, grad_scale: 32.0 +2023-03-26 09:51:57,920 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2127, 2.0123, 1.6697, 2.1346, 2.1622, 1.8160, 2.4616, 2.1892], + device='cuda:5'), covar=tensor([0.1522, 0.2626, 0.3669, 0.2904, 0.2824, 0.1982, 0.3312, 0.2019], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0189, 0.0233, 0.0253, 0.0235, 0.0193, 0.0211, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:51:59,446 INFO [finetune.py:976] (5/7) Epoch 8, batch 4700, loss[loss=0.1743, simple_loss=0.2385, pruned_loss=0.055, over 4828.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.263, pruned_loss=0.06883, over 954685.08 frames. ], batch size: 40, lr: 3.82e-03, grad_scale: 16.0 +2023-03-26 09:52:03,076 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1435, 1.9576, 1.7625, 2.1733, 1.9001, 1.9136, 1.8973, 2.7847], + device='cuda:5'), covar=tensor([0.5167, 0.6964, 0.4456, 0.5936, 0.6225, 0.3115, 0.6449, 0.2021], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0221, 0.0280, 0.0241, 0.0206, 0.0244, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:52:22,742 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.499e+02 1.877e+02 2.319e+02 4.193e+02, threshold=3.754e+02, percent-clipped=1.0 +2023-03-26 09:52:32,228 INFO [finetune.py:976] (5/7) Epoch 8, batch 4750, loss[loss=0.228, simple_loss=0.2806, pruned_loss=0.08772, over 4903.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2623, pruned_loss=0.06878, over 955378.96 frames. ], batch size: 32, lr: 3.82e-03, grad_scale: 16.0 +2023-03-26 09:52:58,204 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44883.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:53:05,275 INFO [finetune.py:976] (5/7) Epoch 8, batch 4800, loss[loss=0.1836, simple_loss=0.2528, pruned_loss=0.05715, over 4692.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.265, pruned_loss=0.07, over 955202.26 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 16.0 +2023-03-26 09:53:15,870 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44906.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:53:41,287 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.589e+02 1.932e+02 2.383e+02 4.430e+02, threshold=3.864e+02, percent-clipped=2.0 +2023-03-26 09:53:55,403 INFO [finetune.py:976] (5/7) Epoch 8, batch 4850, loss[loss=0.1702, simple_loss=0.2424, pruned_loss=0.04894, over 4780.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2675, pruned_loss=0.07062, over 955190.86 frames. ], batch size: 29, lr: 3.82e-03, grad_scale: 16.0 +2023-03-26 09:54:38,922 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9712, 2.0688, 2.0348, 1.6024, 2.1950, 2.2197, 2.1246, 1.7852], + device='cuda:5'), covar=tensor([0.0636, 0.0609, 0.0801, 0.0854, 0.0571, 0.0682, 0.0633, 0.1097], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0132, 0.0144, 0.0125, 0.0115, 0.0144, 0.0144, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:54:57,805 INFO [finetune.py:976] (5/7) Epoch 8, batch 4900, loss[loss=0.2194, simple_loss=0.2897, pruned_loss=0.07455, over 4817.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2699, pruned_loss=0.07165, over 956911.91 frames. ], batch size: 47, lr: 3.82e-03, grad_scale: 16.0 +2023-03-26 09:55:07,255 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7518, 1.7113, 1.5474, 2.0246, 2.2246, 1.8934, 1.6249, 1.4482], + device='cuda:5'), covar=tensor([0.2195, 0.2134, 0.1798, 0.1523, 0.1898, 0.1185, 0.2468, 0.1913], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0209, 0.0205, 0.0187, 0.0240, 0.0178, 0.0214, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:55:25,593 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.694e+02 2.008e+02 2.325e+02 4.035e+02, threshold=4.016e+02, percent-clipped=1.0 +2023-03-26 09:55:44,370 INFO [finetune.py:976] (5/7) Epoch 8, batch 4950, loss[loss=0.178, simple_loss=0.2567, pruned_loss=0.04972, over 4839.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2703, pruned_loss=0.0713, over 956176.55 frames. ], batch size: 47, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 09:55:45,764 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1154, 2.7158, 2.5635, 1.4184, 2.6604, 2.1837, 1.9844, 2.2621], + device='cuda:5'), covar=tensor([0.0979, 0.0936, 0.1731, 0.2420, 0.1785, 0.2397, 0.2327, 0.1362], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0201, 0.0201, 0.0188, 0.0216, 0.0205, 0.0221, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:55:45,960 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 09:55:57,052 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45056.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:56:00,122 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5084, 1.3690, 1.4468, 1.4119, 0.9077, 2.3214, 0.8326, 1.4246], + device='cuda:5'), covar=tensor([0.3538, 0.2532, 0.2185, 0.2456, 0.2027, 0.0394, 0.2688, 0.1348], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0120, 0.0122, 0.0117, 0.0098, 0.0100, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 09:56:04,183 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2868, 2.8958, 2.7536, 1.2035, 2.9816, 2.2200, 0.6730, 1.9673], + device='cuda:5'), covar=tensor([0.2561, 0.2067, 0.1801, 0.3456, 0.1379, 0.1159, 0.4079, 0.1657], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0172, 0.0161, 0.0129, 0.0156, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 09:56:13,224 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45081.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:56:21,543 INFO [finetune.py:976] (5/7) Epoch 8, batch 5000, loss[loss=0.1665, simple_loss=0.229, pruned_loss=0.05197, over 4837.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2686, pruned_loss=0.07034, over 956466.95 frames. ], batch size: 33, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 09:56:24,123 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9497, 1.7995, 1.5165, 1.7389, 1.6638, 1.6524, 1.6665, 2.4155], + device='cuda:5'), covar=tensor([0.5166, 0.5478, 0.4233, 0.5049, 0.5062, 0.2990, 0.5157, 0.2089], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0261, 0.0223, 0.0282, 0.0242, 0.0208, 0.0247, 0.0209], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:56:34,160 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1618, 1.9227, 1.5045, 0.5773, 1.6545, 1.7679, 1.5997, 1.7547], + device='cuda:5'), covar=tensor([0.0916, 0.0819, 0.1562, 0.2155, 0.1417, 0.2342, 0.2460, 0.0947], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0201, 0.0201, 0.0188, 0.0217, 0.0206, 0.0221, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:56:37,064 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45117.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 09:56:45,213 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.621e+02 1.919e+02 2.483e+02 3.797e+02, threshold=3.837e+02, percent-clipped=0.0 +2023-03-26 09:56:48,405 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7222, 1.6046, 1.3938, 1.3822, 1.7738, 1.4566, 1.8069, 1.7037], + device='cuda:5'), covar=tensor([0.1578, 0.2536, 0.3569, 0.2815, 0.2974, 0.1873, 0.3305, 0.2082], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0189, 0.0234, 0.0255, 0.0236, 0.0194, 0.0212, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 09:56:52,683 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45142.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:56:53,773 INFO [finetune.py:976] (5/7) Epoch 8, batch 5050, loss[loss=0.1712, simple_loss=0.242, pruned_loss=0.05019, over 4793.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2647, pruned_loss=0.06882, over 956471.40 frames. ], batch size: 51, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 09:57:19,963 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45183.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:57:26,575 INFO [finetune.py:976] (5/7) Epoch 8, batch 5100, loss[loss=0.2119, simple_loss=0.2724, pruned_loss=0.07567, over 4753.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2623, pruned_loss=0.06805, over 957350.57 frames. ], batch size: 54, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 09:57:34,987 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45206.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:57:55,113 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.141e+02 1.630e+02 1.903e+02 2.262e+02 3.588e+02, threshold=3.806e+02, percent-clipped=0.0 +2023-03-26 09:57:55,795 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=45231.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:58:04,088 INFO [finetune.py:976] (5/7) Epoch 8, batch 5150, loss[loss=0.1801, simple_loss=0.2473, pruned_loss=0.05647, over 4821.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2638, pruned_loss=0.06928, over 957653.15 frames. ], batch size: 40, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 09:58:11,269 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=45254.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 09:58:16,530 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 09:58:40,359 INFO [finetune.py:976] (5/7) Epoch 8, batch 5200, loss[loss=0.2029, simple_loss=0.2756, pruned_loss=0.06511, over 4873.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2682, pruned_loss=0.07072, over 957678.93 frames. ], batch size: 34, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 09:59:01,005 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.64 vs. limit=5.0 +2023-03-26 09:59:09,675 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.659e+02 1.911e+02 2.326e+02 4.760e+02, threshold=3.822e+02, percent-clipped=1.0 +2023-03-26 09:59:18,770 INFO [finetune.py:976] (5/7) Epoch 8, batch 5250, loss[loss=0.2285, simple_loss=0.2824, pruned_loss=0.08731, over 4823.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2703, pruned_loss=0.07165, over 955367.87 frames. ], batch size: 40, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 09:59:27,650 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45348.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:00:03,243 INFO [finetune.py:976] (5/7) Epoch 8, batch 5300, loss[loss=0.18, simple_loss=0.2635, pruned_loss=0.04821, over 4777.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2705, pruned_loss=0.0714, over 952665.33 frames. ], batch size: 29, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:00:16,221 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45409.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 10:00:18,474 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45412.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 10:00:30,691 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.820e+01 1.516e+02 1.858e+02 2.399e+02 4.469e+02, threshold=3.716e+02, percent-clipped=2.0 +2023-03-26 10:00:39,971 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45437.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:00:40,236 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-03-26 10:00:49,415 INFO [finetune.py:976] (5/7) Epoch 8, batch 5350, loss[loss=0.2057, simple_loss=0.2742, pruned_loss=0.06864, over 4752.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2695, pruned_loss=0.07071, over 954416.66 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:00:51,238 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1355, 2.2006, 2.1764, 1.5625, 2.2908, 2.2735, 2.1712, 1.9120], + device='cuda:5'), covar=tensor([0.0578, 0.0544, 0.0677, 0.0833, 0.0482, 0.0643, 0.0650, 0.0994], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0133, 0.0145, 0.0125, 0.0116, 0.0145, 0.0145, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:01:54,088 INFO [finetune.py:976] (5/7) Epoch 8, batch 5400, loss[loss=0.2115, simple_loss=0.2604, pruned_loss=0.08134, over 4898.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2669, pruned_loss=0.06993, over 953671.23 frames. ], batch size: 35, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:02:14,570 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4627, 2.8798, 2.8101, 1.2742, 3.0412, 2.1730, 0.7823, 1.9115], + device='cuda:5'), covar=tensor([0.2115, 0.2002, 0.1720, 0.3834, 0.1250, 0.1211, 0.4317, 0.1708], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0171, 0.0159, 0.0127, 0.0155, 0.0121, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 10:02:24,136 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0036, 1.4349, 1.1676, 1.8169, 2.3967, 1.5054, 1.7009, 1.8110], + device='cuda:5'), covar=tensor([0.1268, 0.2014, 0.1830, 0.1065, 0.1647, 0.1708, 0.1386, 0.1719], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0097, 0.0114, 0.0092, 0.0123, 0.0096, 0.0101, 0.0093], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:02:26,608 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6169, 1.5403, 1.5138, 1.5574, 1.0417, 2.7697, 1.1336, 1.5557], + device='cuda:5'), covar=tensor([0.3016, 0.2210, 0.1843, 0.2168, 0.1759, 0.0270, 0.2356, 0.1166], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0119, 0.0123, 0.0116, 0.0098, 0.0100, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:02:34,314 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.640e+02 2.019e+02 2.439e+02 4.086e+02, threshold=4.037e+02, percent-clipped=1.0 +2023-03-26 10:02:45,332 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9054, 3.7605, 3.7476, 2.0141, 4.0025, 2.9082, 1.3663, 2.8217], + device='cuda:5'), covar=tensor([0.3063, 0.1651, 0.1356, 0.3045, 0.0869, 0.0962, 0.3664, 0.1454], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0173, 0.0160, 0.0129, 0.0156, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 10:02:54,209 INFO [finetune.py:976] (5/7) Epoch 8, batch 5450, loss[loss=0.2113, simple_loss=0.2734, pruned_loss=0.07456, over 4770.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2638, pruned_loss=0.06893, over 952306.32 frames. ], batch size: 54, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:03:54,239 INFO [finetune.py:976] (5/7) Epoch 8, batch 5500, loss[loss=0.1659, simple_loss=0.2306, pruned_loss=0.05055, over 4743.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.261, pruned_loss=0.06806, over 954290.77 frames. ], batch size: 27, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:04:12,211 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 10:04:14,847 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.71 vs. limit=5.0 +2023-03-26 10:04:18,373 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.004e+02 1.550e+02 1.829e+02 2.210e+02 3.729e+02, threshold=3.658e+02, percent-clipped=0.0 +2023-03-26 10:04:28,411 INFO [finetune.py:976] (5/7) Epoch 8, batch 5550, loss[loss=0.1692, simple_loss=0.2383, pruned_loss=0.05007, over 4862.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2639, pruned_loss=0.06927, over 954952.64 frames. ], batch size: 31, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:04:45,336 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.08 vs. limit=5.0 +2023-03-26 10:05:06,262 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7673, 1.7410, 1.5307, 1.8988, 2.2961, 1.8571, 1.5069, 1.4549], + device='cuda:5'), covar=tensor([0.2166, 0.2019, 0.1864, 0.1630, 0.1837, 0.1111, 0.2551, 0.1892], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0209, 0.0205, 0.0188, 0.0241, 0.0179, 0.0214, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:05:19,424 INFO [finetune.py:976] (5/7) Epoch 8, batch 5600, loss[loss=0.2278, simple_loss=0.2885, pruned_loss=0.08353, over 4745.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2672, pruned_loss=0.06972, over 954461.96 frames. ], batch size: 54, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:05:24,108 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45702.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:05:25,261 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45704.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 10:05:29,918 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45712.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:05:40,353 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.219e+02 1.739e+02 1.993e+02 2.505e+02 5.014e+02, threshold=3.987e+02, percent-clipped=3.0 +2023-03-26 10:05:44,493 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45737.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:05:48,899 INFO [finetune.py:976] (5/7) Epoch 8, batch 5650, loss[loss=0.2143, simple_loss=0.2849, pruned_loss=0.07187, over 4791.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2717, pruned_loss=0.07106, over 954897.81 frames. ], batch size: 29, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:05:50,205 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9505, 1.3694, 1.8992, 1.8578, 1.6564, 1.6300, 1.7144, 1.7520], + device='cuda:5'), covar=tensor([0.4465, 0.5067, 0.4454, 0.4795, 0.5954, 0.4671, 0.6311, 0.4222], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0241, 0.0254, 0.0255, 0.0247, 0.0224, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:05:58,593 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=45760.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:06:00,381 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45763.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:06:06,737 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45774.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:06:11,449 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5617, 0.6656, 1.4985, 1.3980, 1.3714, 1.2872, 1.2415, 1.4519], + device='cuda:5'), covar=tensor([0.3235, 0.4173, 0.3834, 0.3325, 0.4569, 0.3294, 0.4518, 0.3326], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0241, 0.0253, 0.0255, 0.0247, 0.0223, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:06:13,164 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=45785.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:06:18,811 INFO [finetune.py:976] (5/7) Epoch 8, batch 5700, loss[loss=0.1757, simple_loss=0.2332, pruned_loss=0.0591, over 4276.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2658, pruned_loss=0.06938, over 937051.03 frames. ], batch size: 18, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:06:54,870 INFO [finetune.py:976] (5/7) Epoch 9, batch 0, loss[loss=0.1969, simple_loss=0.264, pruned_loss=0.06488, over 4818.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.264, pruned_loss=0.06488, over 4818.00 frames. ], batch size: 30, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:06:54,870 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 10:07:06,238 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8427, 1.6215, 2.1271, 2.7038, 2.0182, 2.2548, 1.2433, 2.1988], + device='cuda:5'), covar=tensor([0.1344, 0.1225, 0.0899, 0.0597, 0.0769, 0.1217, 0.1373, 0.0618], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0118, 0.0135, 0.0166, 0.0103, 0.0140, 0.0127, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:07:11,030 INFO [finetune.py:1010] (5/7) Epoch 9, validation: loss=0.1616, simple_loss=0.233, pruned_loss=0.04515, over 2265189.00 frames. +2023-03-26 10:07:11,031 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 10:07:17,598 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.132e+01 1.600e+02 1.914e+02 2.307e+02 4.538e+02, threshold=3.829e+02, percent-clipped=2.0 +2023-03-26 10:07:22,769 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45835.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:07:33,764 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5670, 1.1024, 0.8112, 1.5686, 2.0141, 1.3299, 1.3242, 1.6036], + device='cuda:5'), covar=tensor([0.1441, 0.1996, 0.2067, 0.1128, 0.1960, 0.2110, 0.1403, 0.1745], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0097, 0.0114, 0.0092, 0.0123, 0.0096, 0.0100, 0.0093], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:07:46,513 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45859.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:07:55,656 INFO [finetune.py:976] (5/7) Epoch 9, batch 50, loss[loss=0.1882, simple_loss=0.2417, pruned_loss=0.06733, over 4748.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2748, pruned_loss=0.07282, over 217791.63 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:08:13,656 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3863, 1.4755, 1.5790, 0.8266, 1.5734, 1.7216, 1.7897, 1.3361], + device='cuda:5'), covar=tensor([0.1005, 0.0580, 0.0507, 0.0624, 0.0416, 0.0682, 0.0350, 0.0953], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0157, 0.0121, 0.0136, 0.0132, 0.0125, 0.0147, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.6434e-05, 1.1582e-04, 8.7317e-05, 9.9096e-05, 9.4817e-05, 9.1873e-05, + 1.0774e-04, 1.0883e-04], device='cuda:5') +2023-03-26 10:08:20,520 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 10:08:28,788 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9168, 1.5641, 2.2377, 2.1136, 1.7620, 4.3390, 1.4385, 1.9891], + device='cuda:5'), covar=tensor([0.0897, 0.1818, 0.1035, 0.0946, 0.1567, 0.0259, 0.1534, 0.1700], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0076, 0.0079, 0.0093, 0.0084, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:08:35,695 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45920.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:08:36,814 INFO [finetune.py:976] (5/7) Epoch 9, batch 100, loss[loss=0.1704, simple_loss=0.2419, pruned_loss=0.04944, over 4821.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.263, pruned_loss=0.06805, over 381626.67 frames. ], batch size: 33, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:08:42,570 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.760e+02 2.008e+02 2.423e+02 3.807e+02, threshold=4.016e+02, percent-clipped=0.0 +2023-03-26 10:08:43,049 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.30 vs. limit=5.0 +2023-03-26 10:09:10,376 INFO [finetune.py:976] (5/7) Epoch 9, batch 150, loss[loss=0.2135, simple_loss=0.2622, pruned_loss=0.08245, over 4904.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2587, pruned_loss=0.0669, over 508801.42 frames. ], batch size: 32, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:09:26,823 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8996, 1.1881, 1.8164, 1.7920, 1.5919, 1.5532, 1.6574, 1.5812], + device='cuda:5'), covar=tensor([0.3972, 0.5041, 0.3986, 0.4332, 0.5618, 0.4140, 0.5344, 0.3954], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0242, 0.0254, 0.0255, 0.0247, 0.0224, 0.0274, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:09:32,041 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46004.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 10:09:49,161 INFO [finetune.py:976] (5/7) Epoch 9, batch 200, loss[loss=0.1746, simple_loss=0.2419, pruned_loss=0.05372, over 4821.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2588, pruned_loss=0.06776, over 608655.46 frames. ], batch size: 39, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:09:58,662 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.086e+02 1.678e+02 2.056e+02 2.461e+02 4.455e+02, threshold=4.113e+02, percent-clipped=4.0 +2023-03-26 10:10:22,927 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=46052.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:10:22,988 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5664, 1.4099, 1.3610, 1.6159, 1.7138, 1.6207, 0.9290, 1.3810], + device='cuda:5'), covar=tensor([0.2095, 0.1993, 0.1909, 0.1533, 0.1501, 0.1152, 0.2648, 0.1771], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0209, 0.0206, 0.0188, 0.0242, 0.0179, 0.0214, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:10:26,561 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46058.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:10:36,493 INFO [finetune.py:976] (5/7) Epoch 9, batch 250, loss[loss=0.1826, simple_loss=0.2544, pruned_loss=0.05538, over 4905.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2635, pruned_loss=0.0698, over 685477.86 frames. ], batch size: 35, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:10:38,716 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 10:11:09,154 INFO [finetune.py:976] (5/7) Epoch 9, batch 300, loss[loss=0.1835, simple_loss=0.2452, pruned_loss=0.06093, over 4832.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2666, pruned_loss=0.07062, over 744973.26 frames. ], batch size: 33, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:11:13,074 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-03-26 10:11:14,968 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.737e+02 2.021e+02 2.354e+02 3.684e+02, threshold=4.042e+02, percent-clipped=0.0 +2023-03-26 10:11:15,063 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46130.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:11:22,168 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9017, 1.9523, 1.9368, 1.2859, 2.0226, 2.0541, 2.0695, 1.6406], + device='cuda:5'), covar=tensor([0.0574, 0.0579, 0.0696, 0.0924, 0.0566, 0.0663, 0.0604, 0.1036], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0132, 0.0144, 0.0124, 0.0116, 0.0144, 0.0144, 0.0159], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:11:22,233 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-03-26 10:11:41,958 INFO [finetune.py:976] (5/7) Epoch 9, batch 350, loss[loss=0.2455, simple_loss=0.3033, pruned_loss=0.09386, over 4927.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2693, pruned_loss=0.07102, over 793047.20 frames. ], batch size: 33, lr: 3.81e-03, grad_scale: 16.0 +2023-03-26 10:11:52,844 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7210, 3.6146, 3.3220, 1.6321, 3.7023, 2.8058, 0.6884, 2.4413], + device='cuda:5'), covar=tensor([0.2443, 0.1692, 0.1546, 0.3341, 0.1001, 0.1018, 0.4473, 0.1490], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0172, 0.0160, 0.0128, 0.0156, 0.0122, 0.0145, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 10:12:11,377 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46215.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:12:17,565 INFO [finetune.py:976] (5/7) Epoch 9, batch 400, loss[loss=0.2097, simple_loss=0.265, pruned_loss=0.0772, over 4859.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2698, pruned_loss=0.07115, over 830198.58 frames. ], batch size: 34, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:12:23,441 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.951e+01 1.676e+02 2.053e+02 2.418e+02 4.627e+02, threshold=4.106e+02, percent-clipped=2.0 +2023-03-26 10:13:00,669 INFO [finetune.py:976] (5/7) Epoch 9, batch 450, loss[loss=0.2047, simple_loss=0.2639, pruned_loss=0.07274, over 4849.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2672, pruned_loss=0.06982, over 858467.54 frames. ], batch size: 49, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:13:03,239 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46276.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:13:35,970 INFO [finetune.py:976] (5/7) Epoch 9, batch 500, loss[loss=0.2026, simple_loss=0.2733, pruned_loss=0.066, over 4881.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2654, pruned_loss=0.06955, over 879315.39 frames. ], batch size: 43, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:13:45,335 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.586e+02 1.900e+02 2.408e+02 3.619e+02, threshold=3.799e+02, percent-clipped=0.0 +2023-03-26 10:13:54,715 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46337.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:13:59,936 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-26 10:14:08,395 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46358.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:14:09,079 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-26 10:14:17,261 INFO [finetune.py:976] (5/7) Epoch 9, batch 550, loss[loss=0.2393, simple_loss=0.2914, pruned_loss=0.09362, over 4854.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2626, pruned_loss=0.06863, over 897628.30 frames. ], batch size: 44, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:14:39,979 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=46406.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:14:43,746 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6075, 1.7019, 1.7562, 1.0277, 1.7259, 2.0673, 1.9562, 1.5174], + device='cuda:5'), covar=tensor([0.0882, 0.0482, 0.0402, 0.0502, 0.0362, 0.0402, 0.0280, 0.0509], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0121, 0.0136, 0.0132, 0.0125, 0.0146, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.6270e-05, 1.1497e-04, 8.7723e-05, 9.8639e-05, 9.4303e-05, 9.1793e-05, + 1.0692e-04, 1.0814e-04], device='cuda:5') +2023-03-26 10:14:50,108 INFO [finetune.py:976] (5/7) Epoch 9, batch 600, loss[loss=0.1883, simple_loss=0.2596, pruned_loss=0.05856, over 4889.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2628, pruned_loss=0.06848, over 911289.09 frames. ], batch size: 32, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:14:54,848 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.702e+02 1.969e+02 2.390e+02 4.680e+02, threshold=3.938e+02, percent-clipped=3.0 +2023-03-26 10:14:54,941 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46430.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:15:36,213 INFO [finetune.py:976] (5/7) Epoch 9, batch 650, loss[loss=0.2003, simple_loss=0.2749, pruned_loss=0.06288, over 4808.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2654, pruned_loss=0.06902, over 920435.31 frames. ], batch size: 45, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:15:40,463 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=46478.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:15:41,716 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5354, 1.5023, 1.5695, 1.7189, 1.5372, 3.2077, 1.2748, 1.5428], + device='cuda:5'), covar=tensor([0.0963, 0.1777, 0.1143, 0.0993, 0.1638, 0.0240, 0.1645, 0.1843], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0076, 0.0079, 0.0093, 0.0083, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:15:52,022 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-26 10:16:05,402 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46515.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:16:09,544 INFO [finetune.py:976] (5/7) Epoch 9, batch 700, loss[loss=0.2279, simple_loss=0.2907, pruned_loss=0.08258, over 4922.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2674, pruned_loss=0.06976, over 929019.25 frames. ], batch size: 42, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:16:14,888 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.666e+02 2.010e+02 2.529e+02 4.289e+02, threshold=4.019e+02, percent-clipped=2.0 +2023-03-26 10:16:26,205 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6677, 1.5858, 1.4921, 1.5240, 1.9890, 1.8877, 1.7522, 1.4057], + device='cuda:5'), covar=tensor([0.0299, 0.0317, 0.0523, 0.0300, 0.0184, 0.0473, 0.0281, 0.0396], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0110, 0.0139, 0.0115, 0.0102, 0.0101, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.9912e-05, 8.6186e-05, 1.1125e-04, 9.0722e-05, 8.0368e-05, 7.5266e-05, + 6.8636e-05, 8.3810e-05], device='cuda:5') +2023-03-26 10:16:37,473 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=46563.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:16:42,851 INFO [finetune.py:976] (5/7) Epoch 9, batch 750, loss[loss=0.1783, simple_loss=0.2504, pruned_loss=0.05308, over 4799.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2686, pruned_loss=0.06998, over 935352.16 frames. ], batch size: 45, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:16:58,833 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46595.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:17:01,747 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2334, 1.2909, 1.6124, 0.9904, 1.2572, 1.3989, 1.2749, 1.6080], + device='cuda:5'), covar=tensor([0.1214, 0.2137, 0.1258, 0.1589, 0.1004, 0.1286, 0.2784, 0.0843], + device='cuda:5'), in_proj_covar=tensor([0.0202, 0.0205, 0.0198, 0.0195, 0.0182, 0.0220, 0.0219, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:17:03,704 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46602.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:17:15,993 INFO [finetune.py:976] (5/7) Epoch 9, batch 800, loss[loss=0.1765, simple_loss=0.2421, pruned_loss=0.05549, over 4868.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2675, pruned_loss=0.06881, over 939961.29 frames. ], batch size: 34, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:17:20,814 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.216e+01 1.561e+02 1.863e+02 2.153e+02 3.377e+02, threshold=3.726e+02, percent-clipped=0.0 +2023-03-26 10:17:22,505 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46632.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:17:39,098 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46656.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:17:40,244 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 10:17:43,773 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46663.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:17:49,096 INFO [finetune.py:976] (5/7) Epoch 9, batch 850, loss[loss=0.2146, simple_loss=0.2783, pruned_loss=0.07541, over 4790.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2652, pruned_loss=0.06786, over 945380.41 frames. ], batch size: 29, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:18:34,821 INFO [finetune.py:976] (5/7) Epoch 9, batch 900, loss[loss=0.2066, simple_loss=0.2583, pruned_loss=0.07743, over 4902.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2622, pruned_loss=0.06719, over 949126.46 frames. ], batch size: 36, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:18:39,658 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.365e+01 1.501e+02 1.768e+02 2.130e+02 3.855e+02, threshold=3.537e+02, percent-clipped=1.0 +2023-03-26 10:19:05,946 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5649, 1.4211, 1.8059, 2.9044, 2.0306, 2.1214, 1.0232, 2.3401], + device='cuda:5'), covar=tensor([0.1748, 0.1495, 0.1302, 0.0707, 0.0805, 0.1295, 0.1737, 0.0682], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0117, 0.0133, 0.0165, 0.0102, 0.0138, 0.0126, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:19:09,050 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6501, 1.5098, 1.7053, 1.8536, 1.5552, 3.1960, 1.3372, 1.5875], + device='cuda:5'), covar=tensor([0.0849, 0.1642, 0.1233, 0.0882, 0.1533, 0.0234, 0.1452, 0.1694], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0076, 0.0079, 0.0093, 0.0084, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:19:10,159 INFO [finetune.py:976] (5/7) Epoch 9, batch 950, loss[loss=0.173, simple_loss=0.2427, pruned_loss=0.05161, over 4763.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2602, pruned_loss=0.06652, over 951976.86 frames. ], batch size: 27, lr: 3.80e-03, grad_scale: 32.0 +2023-03-26 10:19:33,609 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1813, 1.2971, 1.4519, 0.7082, 1.3491, 1.6125, 1.6146, 1.2700], + device='cuda:5'), covar=tensor([0.1054, 0.0789, 0.0528, 0.0624, 0.0535, 0.0604, 0.0459, 0.0766], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0121, 0.0135, 0.0131, 0.0125, 0.0145, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.5887e-05, 1.1509e-04, 8.7053e-05, 9.7972e-05, 9.4031e-05, 9.1729e-05, + 1.0676e-04, 1.0816e-04], device='cuda:5') +2023-03-26 10:19:36,025 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46810.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:19:44,265 INFO [finetune.py:976] (5/7) Epoch 9, batch 1000, loss[loss=0.2316, simple_loss=0.2946, pruned_loss=0.08426, over 4803.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2627, pruned_loss=0.06773, over 953914.04 frames. ], batch size: 51, lr: 3.80e-03, grad_scale: 32.0 +2023-03-26 10:19:49,058 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.690e+02 1.992e+02 2.479e+02 5.334e+02, threshold=3.983e+02, percent-clipped=2.0 +2023-03-26 10:20:22,585 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46871.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:20:23,064 INFO [finetune.py:976] (5/7) Epoch 9, batch 1050, loss[loss=0.1675, simple_loss=0.2337, pruned_loss=0.05061, over 4890.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2651, pruned_loss=0.06805, over 954052.91 frames. ], batch size: 32, lr: 3.80e-03, grad_scale: 32.0 +2023-03-26 10:20:56,964 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1095, 1.6071, 2.0160, 1.9627, 1.7063, 1.7281, 1.9065, 1.8340], + device='cuda:5'), covar=tensor([0.4664, 0.5562, 0.4222, 0.5046, 0.6151, 0.4598, 0.6354, 0.4066], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0241, 0.0254, 0.0256, 0.0248, 0.0224, 0.0274, 0.0230], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:21:05,010 INFO [finetune.py:976] (5/7) Epoch 9, batch 1100, loss[loss=0.1585, simple_loss=0.2282, pruned_loss=0.04437, over 4748.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2673, pruned_loss=0.06954, over 953911.09 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:21:10,488 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 1.654e+02 2.044e+02 2.534e+02 3.814e+02, threshold=4.089e+02, percent-clipped=0.0 +2023-03-26 10:21:11,166 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46932.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:21:23,058 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46951.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 10:21:25,535 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1888, 1.2989, 1.6652, 0.9891, 1.2058, 1.4100, 1.2730, 1.5991], + device='cuda:5'), covar=tensor([0.1361, 0.2238, 0.1292, 0.1795, 0.1152, 0.1422, 0.2843, 0.0964], + device='cuda:5'), in_proj_covar=tensor([0.0201, 0.0206, 0.0197, 0.0195, 0.0182, 0.0220, 0.0219, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:21:28,254 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46958.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:21:37,702 INFO [finetune.py:976] (5/7) Epoch 9, batch 1150, loss[loss=0.168, simple_loss=0.2368, pruned_loss=0.04955, over 4776.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2679, pruned_loss=0.06925, over 955710.29 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:21:42,583 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=46980.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:21:57,315 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.4411, 2.8112, 2.6016, 1.3897, 2.8595, 2.4016, 2.2001, 2.4709], + device='cuda:5'), covar=tensor([0.0815, 0.1060, 0.1771, 0.2315, 0.1916, 0.2138, 0.2083, 0.1259], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0201, 0.0188, 0.0218, 0.0206, 0.0222, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:22:10,619 INFO [finetune.py:976] (5/7) Epoch 9, batch 1200, loss[loss=0.1739, simple_loss=0.2471, pruned_loss=0.05033, over 4843.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2658, pruned_loss=0.06837, over 955802.00 frames. ], batch size: 44, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:22:15,010 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0711, 1.7695, 2.5522, 4.1752, 2.9268, 2.6766, 0.9180, 3.3770], + device='cuda:5'), covar=tensor([0.1925, 0.1719, 0.1574, 0.0542, 0.0808, 0.1574, 0.2236, 0.0475], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0118, 0.0135, 0.0167, 0.0102, 0.0139, 0.0127, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:22:16,139 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.814e+01 1.625e+02 2.018e+02 2.406e+02 3.989e+02, threshold=4.036e+02, percent-clipped=0.0 +2023-03-26 10:22:42,198 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 10:22:43,585 INFO [finetune.py:976] (5/7) Epoch 9, batch 1250, loss[loss=0.2266, simple_loss=0.2763, pruned_loss=0.08847, over 4911.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2633, pruned_loss=0.06768, over 953865.60 frames. ], batch size: 36, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:23:04,123 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7437, 1.7350, 2.2466, 1.3797, 2.0189, 2.1497, 1.7471, 2.3960], + device='cuda:5'), covar=tensor([0.1483, 0.1907, 0.1420, 0.2120, 0.0917, 0.1504, 0.2492, 0.0783], + device='cuda:5'), in_proj_covar=tensor([0.0200, 0.0204, 0.0196, 0.0193, 0.0181, 0.0218, 0.0217, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:23:21,439 INFO [finetune.py:976] (5/7) Epoch 9, batch 1300, loss[loss=0.2193, simple_loss=0.27, pruned_loss=0.08435, over 4758.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2602, pruned_loss=0.06666, over 954630.65 frames. ], batch size: 28, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:23:31,784 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.169e+01 1.658e+02 1.956e+02 2.404e+02 5.414e+02, threshold=3.912e+02, percent-clipped=2.0 +2023-03-26 10:23:38,574 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4387, 1.3155, 1.5336, 0.9204, 1.5224, 1.5823, 1.4775, 1.1459], + device='cuda:5'), covar=tensor([0.0593, 0.0819, 0.0629, 0.0900, 0.0733, 0.0583, 0.0671, 0.1519], + device='cuda:5'), in_proj_covar=tensor([0.0137, 0.0133, 0.0145, 0.0125, 0.0117, 0.0145, 0.0145, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:23:58,427 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47166.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:23:59,282 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.81 vs. limit=5.0 +2023-03-26 10:24:03,031 INFO [finetune.py:976] (5/7) Epoch 9, batch 1350, loss[loss=0.2246, simple_loss=0.2724, pruned_loss=0.08844, over 3954.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2614, pruned_loss=0.06765, over 954693.29 frames. ], batch size: 17, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:24:09,705 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-26 10:24:20,019 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-03-26 10:24:20,171 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.91 vs. limit=5.0 +2023-03-26 10:24:33,925 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-26 10:24:36,409 INFO [finetune.py:976] (5/7) Epoch 9, batch 1400, loss[loss=0.2077, simple_loss=0.2848, pruned_loss=0.06531, over 4758.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2657, pruned_loss=0.0693, over 956401.15 frames. ], batch size: 59, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:24:42,786 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.739e+02 2.002e+02 2.347e+02 4.228e+02, threshold=4.005e+02, percent-clipped=2.0 +2023-03-26 10:24:49,030 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47241.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:24:49,195 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 10:24:55,110 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47251.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:24:59,421 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47258.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:25:09,253 INFO [finetune.py:976] (5/7) Epoch 9, batch 1450, loss[loss=0.2187, simple_loss=0.2839, pruned_loss=0.07678, over 4811.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2679, pruned_loss=0.06969, over 955051.36 frames. ], batch size: 40, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:25:13,986 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7384, 1.6630, 1.4770, 1.8137, 2.2601, 1.8575, 1.4746, 1.4565], + device='cuda:5'), covar=tensor([0.2028, 0.1987, 0.1802, 0.1607, 0.1637, 0.1125, 0.2445, 0.1849], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0208, 0.0205, 0.0188, 0.0241, 0.0179, 0.0215, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:25:27,570 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=47299.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:25:29,411 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47302.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:25:35,611 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2892, 2.8702, 2.7316, 1.1297, 2.8948, 2.1691, 0.6639, 1.8438], + device='cuda:5'), covar=tensor([0.2646, 0.2142, 0.1755, 0.3684, 0.1419, 0.1253, 0.4210, 0.1678], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0173, 0.0160, 0.0129, 0.0156, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 10:25:36,207 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=47306.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:25:53,870 INFO [finetune.py:976] (5/7) Epoch 9, batch 1500, loss[loss=0.2416, simple_loss=0.3024, pruned_loss=0.09034, over 4834.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2704, pruned_loss=0.0713, over 953869.64 frames. ], batch size: 47, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:26:00,803 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.103e+02 1.648e+02 1.983e+02 2.305e+02 4.092e+02, threshold=3.967e+02, percent-clipped=1.0 +2023-03-26 10:26:26,876 INFO [finetune.py:976] (5/7) Epoch 9, batch 1550, loss[loss=0.1851, simple_loss=0.251, pruned_loss=0.05962, over 4782.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2691, pruned_loss=0.07038, over 954241.83 frames. ], batch size: 51, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:26:33,205 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47379.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:26:50,627 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 10:26:51,166 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0567, 2.1363, 2.1189, 1.4598, 2.2218, 2.2296, 2.2385, 1.9206], + device='cuda:5'), covar=tensor([0.0648, 0.0600, 0.0791, 0.0968, 0.0616, 0.0821, 0.0684, 0.1029], + device='cuda:5'), in_proj_covar=tensor([0.0137, 0.0132, 0.0145, 0.0125, 0.0117, 0.0145, 0.0145, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:26:51,595 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-03-26 10:27:00,841 INFO [finetune.py:976] (5/7) Epoch 9, batch 1600, loss[loss=0.2496, simple_loss=0.2974, pruned_loss=0.1009, over 4821.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2669, pruned_loss=0.06976, over 954086.38 frames. ], batch size: 38, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:27:06,413 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 10:27:07,814 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.632e+02 1.991e+02 2.321e+02 5.028e+02, threshold=3.982e+02, percent-clipped=1.0 +2023-03-26 10:27:14,443 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47440.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:27:15,529 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1329, 1.9193, 1.6381, 1.9125, 2.0264, 1.7714, 2.3165, 2.0811], + device='cuda:5'), covar=tensor([0.1555, 0.2623, 0.3537, 0.2952, 0.2959, 0.1972, 0.3723, 0.2123], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0188, 0.0233, 0.0253, 0.0237, 0.0194, 0.0211, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:27:30,690 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47466.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:27:34,230 INFO [finetune.py:976] (5/7) Epoch 9, batch 1650, loss[loss=0.1796, simple_loss=0.2556, pruned_loss=0.05184, over 4771.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2643, pruned_loss=0.06897, over 954786.26 frames. ], batch size: 28, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:27:50,624 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-03-26 10:28:02,973 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=47514.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:28:07,772 INFO [finetune.py:976] (5/7) Epoch 9, batch 1700, loss[loss=0.1702, simple_loss=0.2373, pruned_loss=0.05155, over 4873.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2623, pruned_loss=0.06812, over 955103.18 frames. ], batch size: 31, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:28:13,224 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.617e+02 1.864e+02 2.209e+02 5.015e+02, threshold=3.728e+02, percent-clipped=2.0 +2023-03-26 10:28:18,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0825, 0.9782, 1.0264, 0.4370, 0.8183, 1.1892, 1.2439, 1.0190], + device='cuda:5'), covar=tensor([0.0898, 0.0553, 0.0480, 0.0509, 0.0533, 0.0545, 0.0332, 0.0714], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0121, 0.0135, 0.0132, 0.0125, 0.0146, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.5666e-05, 1.1515e-04, 8.7450e-05, 9.8036e-05, 9.4347e-05, 9.1648e-05, + 1.0683e-04, 1.0838e-04], device='cuda:5') +2023-03-26 10:28:47,730 INFO [finetune.py:976] (5/7) Epoch 9, batch 1750, loss[loss=0.1998, simple_loss=0.2745, pruned_loss=0.06249, over 4791.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2652, pruned_loss=0.06946, over 956840.93 frames. ], batch size: 51, lr: 3.80e-03, grad_scale: 16.0 +2023-03-26 10:29:08,524 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7520, 1.2854, 0.9384, 1.5805, 2.0847, 1.1238, 1.5318, 1.5419], + device='cuda:5'), covar=tensor([0.1546, 0.2218, 0.1971, 0.1214, 0.2069, 0.2108, 0.1475, 0.2112], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0096, 0.0113, 0.0092, 0.0122, 0.0096, 0.0100, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:29:13,719 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47597.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:29:29,377 INFO [finetune.py:976] (5/7) Epoch 9, batch 1800, loss[loss=0.1895, simple_loss=0.2612, pruned_loss=0.05889, over 4910.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2672, pruned_loss=0.06963, over 955721.73 frames. ], batch size: 43, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:29:33,769 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6068, 1.5570, 1.4449, 1.6432, 1.0690, 3.6733, 1.4722, 2.0159], + device='cuda:5'), covar=tensor([0.3590, 0.2525, 0.2368, 0.2566, 0.2128, 0.0173, 0.2557, 0.1283], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0120, 0.0123, 0.0117, 0.0098, 0.0100, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:29:34,862 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.172e+02 1.736e+02 1.994e+02 2.573e+02 6.193e+02, threshold=3.989e+02, percent-clipped=4.0 +2023-03-26 10:29:55,775 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 10:30:02,647 INFO [finetune.py:976] (5/7) Epoch 9, batch 1850, loss[loss=0.236, simple_loss=0.29, pruned_loss=0.09101, over 4909.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2694, pruned_loss=0.07048, over 957818.88 frames. ], batch size: 36, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:30:06,413 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4883, 1.3301, 1.4308, 0.7517, 1.5007, 1.8125, 1.6449, 1.3236], + device='cuda:5'), covar=tensor([0.1291, 0.1156, 0.0556, 0.0795, 0.0504, 0.0636, 0.0469, 0.0869], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0158, 0.0123, 0.0136, 0.0133, 0.0127, 0.0147, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.6973e-05, 1.1639e-04, 8.8668e-05, 9.9142e-05, 9.4993e-05, 9.2878e-05, + 1.0797e-04, 1.0971e-04], device='cuda:5') +2023-03-26 10:30:35,893 INFO [finetune.py:976] (5/7) Epoch 9, batch 1900, loss[loss=0.1646, simple_loss=0.232, pruned_loss=0.04858, over 4806.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2688, pruned_loss=0.06986, over 957045.56 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:30:46,260 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.608e+02 1.835e+02 2.236e+02 3.803e+02, threshold=3.670e+02, percent-clipped=0.0 +2023-03-26 10:30:53,008 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47735.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:30:54,872 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47738.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:31:03,052 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8780, 1.7636, 1.5412, 1.8595, 2.2581, 1.9124, 1.3578, 1.4745], + device='cuda:5'), covar=tensor([0.2103, 0.2032, 0.1893, 0.1616, 0.1721, 0.1154, 0.2635, 0.1906], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0208, 0.0206, 0.0188, 0.0242, 0.0180, 0.0216, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:31:21,754 INFO [finetune.py:976] (5/7) Epoch 9, batch 1950, loss[loss=0.1977, simple_loss=0.2585, pruned_loss=0.06846, over 4938.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2669, pruned_loss=0.06896, over 954534.05 frames. ], batch size: 38, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:31:31,502 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47788.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:31:33,608 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.74 vs. limit=5.0 +2023-03-26 10:31:39,619 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47799.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:31:46,714 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47809.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:31:55,010 INFO [finetune.py:976] (5/7) Epoch 9, batch 2000, loss[loss=0.1651, simple_loss=0.2374, pruned_loss=0.04642, over 4836.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2638, pruned_loss=0.06816, over 954335.54 frames. ], batch size: 30, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:32:00,456 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.527e+02 1.824e+02 2.186e+02 3.277e+02, threshold=3.648e+02, percent-clipped=0.0 +2023-03-26 10:32:06,899 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 10:32:11,908 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47849.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:32:21,721 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1724, 2.0083, 1.7070, 2.0631, 2.1192, 1.8283, 2.4663, 2.2081], + device='cuda:5'), covar=tensor([0.1459, 0.2613, 0.3404, 0.2879, 0.2757, 0.1973, 0.3447, 0.2052], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0187, 0.0231, 0.0252, 0.0236, 0.0193, 0.0210, 0.0192], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:32:35,764 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47870.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:32:36,817 INFO [finetune.py:976] (5/7) Epoch 9, batch 2050, loss[loss=0.2144, simple_loss=0.278, pruned_loss=0.07536, over 4916.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2608, pruned_loss=0.06737, over 952636.48 frames. ], batch size: 37, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:32:57,817 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47897.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:32:57,845 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4757, 1.3971, 1.5096, 0.8515, 1.5438, 1.5685, 1.4824, 1.2950], + device='cuda:5'), covar=tensor([0.0526, 0.0632, 0.0611, 0.0829, 0.0777, 0.0602, 0.0578, 0.1105], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0131, 0.0143, 0.0124, 0.0116, 0.0143, 0.0144, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:33:15,823 INFO [finetune.py:976] (5/7) Epoch 9, batch 2100, loss[loss=0.2127, simple_loss=0.2855, pruned_loss=0.06997, over 4872.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2618, pruned_loss=0.06788, over 953457.69 frames. ], batch size: 44, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:33:21,286 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.115e+02 1.604e+02 1.917e+02 2.370e+02 5.169e+02, threshold=3.834e+02, percent-clipped=3.0 +2023-03-26 10:33:24,978 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7470, 1.4057, 1.7455, 1.8957, 1.4889, 3.2747, 1.2757, 1.5967], + device='cuda:5'), covar=tensor([0.0828, 0.1721, 0.1139, 0.0903, 0.1677, 0.0260, 0.1542, 0.1579], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0078, 0.0092, 0.0083, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:33:29,789 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=47945.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:33:54,984 INFO [finetune.py:976] (5/7) Epoch 9, batch 2150, loss[loss=0.2001, simple_loss=0.2594, pruned_loss=0.07044, over 4726.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2662, pruned_loss=0.06971, over 950901.23 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:33:55,093 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7650, 1.7522, 1.8100, 1.0849, 1.8240, 1.8860, 1.8042, 1.5130], + device='cuda:5'), covar=tensor([0.0566, 0.0657, 0.0726, 0.0923, 0.0646, 0.0700, 0.0644, 0.1130], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0131, 0.0143, 0.0124, 0.0116, 0.0144, 0.0144, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:34:50,619 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3218, 2.1572, 1.7674, 2.3288, 2.0824, 2.0446, 2.0076, 3.0832], + device='cuda:5'), covar=tensor([0.5006, 0.6238, 0.4557, 0.5811, 0.5231, 0.3134, 0.6039, 0.1946], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0258, 0.0220, 0.0278, 0.0241, 0.0207, 0.0243, 0.0208], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:35:00,857 INFO [finetune.py:976] (5/7) Epoch 9, batch 2200, loss[loss=0.1885, simple_loss=0.2638, pruned_loss=0.0566, over 4910.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2686, pruned_loss=0.07022, over 951937.55 frames. ], batch size: 42, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:35:08,960 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2344, 1.9188, 2.6283, 1.5430, 2.4976, 2.3459, 1.8189, 2.5875], + device='cuda:5'), covar=tensor([0.1330, 0.1758, 0.1603, 0.2386, 0.0749, 0.1617, 0.2347, 0.0826], + device='cuda:5'), in_proj_covar=tensor([0.0200, 0.0205, 0.0196, 0.0194, 0.0180, 0.0219, 0.0218, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:35:11,848 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.338e+02 1.754e+02 2.052e+02 2.528e+02 4.321e+02, threshold=4.105e+02, percent-clipped=1.0 +2023-03-26 10:35:18,713 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48035.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:36:02,998 INFO [finetune.py:976] (5/7) Epoch 9, batch 2250, loss[loss=0.1838, simple_loss=0.2422, pruned_loss=0.06272, over 4731.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2695, pruned_loss=0.07083, over 952309.70 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:36:03,123 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48072.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:36:20,661 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=48083.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:36:32,288 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48094.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:37:02,695 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7289, 1.1587, 0.8634, 1.6503, 2.0221, 1.5188, 1.5141, 1.6599], + device='cuda:5'), covar=tensor([0.1479, 0.2060, 0.2143, 0.1130, 0.2112, 0.2394, 0.1358, 0.1931], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0113, 0.0092, 0.0121, 0.0095, 0.0099, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:37:05,452 INFO [finetune.py:976] (5/7) Epoch 9, batch 2300, loss[loss=0.167, simple_loss=0.2373, pruned_loss=0.0484, over 4806.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2694, pruned_loss=0.06988, over 953251.72 frames. ], batch size: 39, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:37:13,687 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8016, 1.2016, 0.7131, 1.6964, 2.1835, 1.3766, 1.5810, 1.6259], + device='cuda:5'), covar=tensor([0.1525, 0.2196, 0.2306, 0.1219, 0.1985, 0.2202, 0.1456, 0.2153], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0113, 0.0092, 0.0121, 0.0095, 0.0099, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:37:16,655 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.508e+01 1.652e+02 1.874e+02 2.360e+02 5.580e+02, threshold=3.748e+02, percent-clipped=1.0 +2023-03-26 10:37:23,217 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48133.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:37:34,216 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48144.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:37:43,669 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 10:37:54,849 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48165.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:38:01,014 INFO [finetune.py:976] (5/7) Epoch 9, batch 2350, loss[loss=0.2096, simple_loss=0.2669, pruned_loss=0.07617, over 4895.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2662, pruned_loss=0.06901, over 953949.36 frames. ], batch size: 35, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:38:35,580 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48212.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 10:38:37,158 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 10:38:43,033 INFO [finetune.py:976] (5/7) Epoch 9, batch 2400, loss[loss=0.1723, simple_loss=0.2462, pruned_loss=0.04917, over 4813.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2629, pruned_loss=0.06773, over 954863.57 frames. ], batch size: 51, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:38:44,536 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 10:38:49,467 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.159e+02 1.602e+02 2.015e+02 2.421e+02 3.465e+02, threshold=4.031e+02, percent-clipped=0.0 +2023-03-26 10:38:53,705 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1173, 1.9777, 1.6671, 2.0369, 2.0529, 1.7348, 2.3763, 2.1130], + device='cuda:5'), covar=tensor([0.1457, 0.2524, 0.3457, 0.2774, 0.2809, 0.1767, 0.3353, 0.1905], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0188, 0.0233, 0.0253, 0.0237, 0.0194, 0.0212, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:39:18,901 INFO [finetune.py:976] (5/7) Epoch 9, batch 2450, loss[loss=0.2294, simple_loss=0.2812, pruned_loss=0.08881, over 4735.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2601, pruned_loss=0.06726, over 955179.94 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:39:19,636 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48273.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:40:01,691 INFO [finetune.py:976] (5/7) Epoch 9, batch 2500, loss[loss=0.1961, simple_loss=0.2608, pruned_loss=0.06571, over 4797.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2606, pruned_loss=0.06741, over 954547.29 frames. ], batch size: 29, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:40:03,384 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48324.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:40:09,019 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.681e+02 1.962e+02 2.420e+02 5.026e+02, threshold=3.923e+02, percent-clipped=2.0 +2023-03-26 10:40:12,637 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9210, 1.7539, 1.4946, 1.6032, 1.6888, 1.6496, 1.6596, 2.4104], + device='cuda:5'), covar=tensor([0.5202, 0.5211, 0.4363, 0.5175, 0.5007, 0.2985, 0.5042, 0.2225], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0222, 0.0280, 0.0243, 0.0209, 0.0245, 0.0210], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:40:35,358 INFO [finetune.py:976] (5/7) Epoch 9, batch 2550, loss[loss=0.1991, simple_loss=0.2648, pruned_loss=0.06674, over 4868.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2649, pruned_loss=0.0684, over 956071.67 frames. ], batch size: 31, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:40:42,824 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-26 10:40:45,889 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7023, 1.6475, 2.0142, 1.2833, 1.6790, 1.9017, 1.5908, 2.1101], + device='cuda:5'), covar=tensor([0.1258, 0.1950, 0.1506, 0.1847, 0.0999, 0.1283, 0.2677, 0.0958], + device='cuda:5'), in_proj_covar=tensor([0.0201, 0.0205, 0.0196, 0.0194, 0.0180, 0.0219, 0.0218, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:40:45,905 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48385.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:40:51,866 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48394.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:40:53,640 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48397.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:40:58,640 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3368, 1.5146, 1.2718, 1.4257, 1.7429, 1.6224, 1.4513, 1.2875], + device='cuda:5'), covar=tensor([0.0350, 0.0247, 0.0514, 0.0300, 0.0215, 0.0476, 0.0287, 0.0336], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0139, 0.0114, 0.0102, 0.0101, 0.0091, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.0229e-05, 8.5601e-05, 1.1104e-04, 8.9734e-05, 8.0391e-05, 7.5266e-05, + 6.9029e-05, 8.2920e-05], device='cuda:5') +2023-03-26 10:41:05,394 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7645, 0.9860, 1.6925, 1.6523, 1.4447, 1.4157, 1.5513, 1.5028], + device='cuda:5'), covar=tensor([0.3838, 0.4891, 0.4161, 0.4197, 0.5526, 0.3990, 0.5177, 0.3939], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0240, 0.0254, 0.0256, 0.0250, 0.0225, 0.0274, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:41:05,699 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.70 vs. limit=5.0 +2023-03-26 10:41:07,199 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6135, 1.5472, 1.3463, 1.7319, 1.9251, 1.6167, 1.2601, 1.3393], + device='cuda:5'), covar=tensor([0.2334, 0.2180, 0.2016, 0.1640, 0.1698, 0.1267, 0.2797, 0.1907], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0207, 0.0203, 0.0185, 0.0239, 0.0178, 0.0212, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:41:08,895 INFO [finetune.py:976] (5/7) Epoch 9, batch 2600, loss[loss=0.2151, simple_loss=0.2874, pruned_loss=0.07139, over 4918.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2656, pruned_loss=0.06824, over 955437.85 frames. ], batch size: 38, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:41:09,180 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-03-26 10:41:09,601 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48423.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:41:12,598 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48428.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:41:15,191 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.754e+02 2.041e+02 2.553e+02 4.015e+02, threshold=4.083e+02, percent-clipped=1.0 +2023-03-26 10:41:21,738 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 10:41:23,187 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6189, 1.5825, 1.5520, 1.6394, 1.2816, 2.7340, 1.3307, 1.7392], + device='cuda:5'), covar=tensor([0.2826, 0.2150, 0.1843, 0.2097, 0.1622, 0.0341, 0.2879, 0.1176], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0123, 0.0117, 0.0098, 0.0101, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:41:23,751 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=48442.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:41:25,446 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48444.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:41:29,455 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.16 vs. limit=5.0 +2023-03-26 10:41:34,024 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48458.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:41:38,234 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48465.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:41:42,380 INFO [finetune.py:976] (5/7) Epoch 9, batch 2650, loss[loss=0.1934, simple_loss=0.2631, pruned_loss=0.06182, over 4771.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2669, pruned_loss=0.06871, over 955207.51 frames. ], batch size: 51, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:41:56,017 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48484.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:42:06,173 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=48492.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:42:19,479 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=48513.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:42:20,451 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-26 10:42:24,898 INFO [finetune.py:976] (5/7) Epoch 9, batch 2700, loss[loss=0.1715, simple_loss=0.2388, pruned_loss=0.05212, over 4812.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2668, pruned_loss=0.06867, over 956839.36 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:42:30,333 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.052e+02 1.590e+02 1.873e+02 2.487e+02 4.580e+02, threshold=3.745e+02, percent-clipped=2.0 +2023-03-26 10:43:07,970 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48568.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 10:43:10,321 INFO [finetune.py:976] (5/7) Epoch 9, batch 2750, loss[loss=0.2122, simple_loss=0.2715, pruned_loss=0.07643, over 4876.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2632, pruned_loss=0.0677, over 957799.78 frames. ], batch size: 31, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:43:22,293 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8932, 1.3663, 1.8778, 1.8440, 1.6773, 1.6168, 1.7626, 1.7045], + device='cuda:5'), covar=tensor([0.4547, 0.4988, 0.4213, 0.4753, 0.5677, 0.4360, 0.5740, 0.4026], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0240, 0.0254, 0.0256, 0.0249, 0.0225, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:43:45,670 INFO [finetune.py:976] (5/7) Epoch 9, batch 2800, loss[loss=0.1582, simple_loss=0.2091, pruned_loss=0.05363, over 3999.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2587, pruned_loss=0.06548, over 958079.66 frames. ], batch size: 17, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:43:51,108 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.018e+02 1.496e+02 1.797e+02 2.211e+02 4.995e+02, threshold=3.593e+02, percent-clipped=1.0 +2023-03-26 10:44:04,889 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9336, 1.9501, 1.9730, 1.2654, 2.0923, 2.1543, 2.0262, 1.6636], + device='cuda:5'), covar=tensor([0.0639, 0.0690, 0.0737, 0.0984, 0.0582, 0.0715, 0.0642, 0.1162], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0131, 0.0142, 0.0123, 0.0116, 0.0142, 0.0143, 0.0159], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:44:19,115 INFO [finetune.py:976] (5/7) Epoch 9, batch 2850, loss[loss=0.2091, simple_loss=0.2728, pruned_loss=0.07269, over 4811.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2585, pruned_loss=0.06548, over 958938.81 frames. ], batch size: 41, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:44:24,043 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48680.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:44:26,028 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 10:45:04,575 INFO [finetune.py:976] (5/7) Epoch 9, batch 2900, loss[loss=0.1931, simple_loss=0.2647, pruned_loss=0.06075, over 4746.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.263, pruned_loss=0.06715, over 958238.92 frames. ], batch size: 26, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:45:08,315 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48728.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:45:10,030 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 1.660e+02 1.926e+02 2.355e+02 4.281e+02, threshold=3.853e+02, percent-clipped=2.0 +2023-03-26 10:45:15,061 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2112, 2.2493, 2.2604, 1.7822, 2.0488, 2.5269, 2.3319, 1.9888], + device='cuda:5'), covar=tensor([0.0504, 0.0561, 0.0634, 0.0834, 0.0931, 0.0598, 0.0527, 0.0930], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0131, 0.0142, 0.0122, 0.0116, 0.0142, 0.0142, 0.0159], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:45:25,494 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48753.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:45:38,483 INFO [finetune.py:976] (5/7) Epoch 9, batch 2950, loss[loss=0.2579, simple_loss=0.3163, pruned_loss=0.09974, over 4893.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2659, pruned_loss=0.06784, over 954867.08 frames. ], batch size: 43, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:45:40,984 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=48776.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:45:42,839 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48779.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:45:51,904 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5525, 3.4342, 3.3284, 1.5985, 3.5604, 2.6839, 0.8817, 2.4526], + device='cuda:5'), covar=tensor([0.2154, 0.1741, 0.1525, 0.3341, 0.1113, 0.0965, 0.4386, 0.1398], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0174, 0.0159, 0.0129, 0.0157, 0.0122, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 10:46:11,321 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48821.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:46:11,795 INFO [finetune.py:976] (5/7) Epoch 9, batch 3000, loss[loss=0.2466, simple_loss=0.3064, pruned_loss=0.09343, over 4716.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2684, pruned_loss=0.069, over 955275.81 frames. ], batch size: 54, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:46:11,795 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 10:46:22,396 INFO [finetune.py:1010] (5/7) Epoch 9, validation: loss=0.159, simple_loss=0.2302, pruned_loss=0.04393, over 2265189.00 frames. +2023-03-26 10:46:22,396 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 10:46:27,909 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.631e+02 1.894e+02 2.277e+02 3.777e+02, threshold=3.789e+02, percent-clipped=0.0 +2023-03-26 10:46:32,268 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48838.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:46:52,191 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48868.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 10:46:54,487 INFO [finetune.py:976] (5/7) Epoch 9, batch 3050, loss[loss=0.2121, simple_loss=0.2723, pruned_loss=0.07589, over 4764.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2686, pruned_loss=0.06889, over 954977.31 frames. ], batch size: 59, lr: 3.79e-03, grad_scale: 16.0 +2023-03-26 10:47:03,426 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48882.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:47:04,009 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.8748, 4.2612, 4.4790, 4.6442, 4.5863, 4.3130, 4.9579, 1.4832], + device='cuda:5'), covar=tensor([0.0597, 0.0736, 0.0694, 0.0710, 0.1044, 0.1481, 0.0507, 0.5338], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0244, 0.0275, 0.0291, 0.0330, 0.0281, 0.0300, 0.0293], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:47:13,612 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48899.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:47:21,497 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0109, 1.7911, 1.5811, 1.7311, 1.7203, 1.6832, 1.7483, 2.4594], + device='cuda:5'), covar=tensor([0.4594, 0.4890, 0.3596, 0.4565, 0.4471, 0.2621, 0.4434, 0.1832], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0258, 0.0220, 0.0278, 0.0241, 0.0207, 0.0244, 0.0208], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:47:25,231 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=48916.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 10:47:29,279 INFO [finetune.py:976] (5/7) Epoch 9, batch 3100, loss[loss=0.1964, simple_loss=0.261, pruned_loss=0.06592, over 4910.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2654, pruned_loss=0.06828, over 954867.00 frames. ], batch size: 36, lr: 3.79e-03, grad_scale: 32.0 +2023-03-26 10:47:36,135 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.624e+02 1.916e+02 2.206e+02 4.881e+02, threshold=3.833e+02, percent-clipped=2.0 +2023-03-26 10:47:58,891 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0521, 1.2469, 1.8737, 1.8279, 1.6992, 1.6755, 1.7443, 1.7853], + device='cuda:5'), covar=tensor([0.4398, 0.5251, 0.4217, 0.4999, 0.6196, 0.4634, 0.5928, 0.4151], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0240, 0.0254, 0.0256, 0.0249, 0.0225, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:48:07,637 INFO [finetune.py:976] (5/7) Epoch 9, batch 3150, loss[loss=0.165, simple_loss=0.2359, pruned_loss=0.04699, over 4915.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2625, pruned_loss=0.06709, over 955255.79 frames. ], batch size: 43, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:48:14,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8864, 0.9559, 1.8127, 1.6983, 1.5606, 1.5015, 1.5245, 1.6596], + device='cuda:5'), covar=tensor([0.3754, 0.4648, 0.3877, 0.4039, 0.4982, 0.4077, 0.5054, 0.3772], + device='cuda:5'), in_proj_covar=tensor([0.0232, 0.0240, 0.0254, 0.0256, 0.0249, 0.0225, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:48:16,687 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48980.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:48:48,386 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 10:48:51,003 INFO [finetune.py:976] (5/7) Epoch 9, batch 3200, loss[loss=0.2294, simple_loss=0.2858, pruned_loss=0.08649, over 4892.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.26, pruned_loss=0.06644, over 956284.04 frames. ], batch size: 43, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:48:55,660 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=49028.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:48:57,863 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.147e+02 1.665e+02 1.973e+02 2.326e+02 6.022e+02, threshold=3.945e+02, percent-clipped=4.0 +2023-03-26 10:48:59,075 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5518, 1.5069, 1.8662, 1.9682, 1.6551, 3.4181, 1.4424, 1.6286], + device='cuda:5'), covar=tensor([0.0965, 0.1719, 0.1139, 0.0911, 0.1449, 0.0243, 0.1443, 0.1636], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0078, 0.0091, 0.0082, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 10:49:11,010 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 10:49:12,725 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49053.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:49:30,123 INFO [finetune.py:976] (5/7) Epoch 9, batch 3250, loss[loss=0.2282, simple_loss=0.3121, pruned_loss=0.07217, over 4859.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2614, pruned_loss=0.06686, over 956516.87 frames. ], batch size: 49, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:49:40,139 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49079.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:50:05,718 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=49101.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:50:21,990 INFO [finetune.py:976] (5/7) Epoch 9, batch 3300, loss[loss=0.2448, simple_loss=0.301, pruned_loss=0.09427, over 4895.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2659, pruned_loss=0.06849, over 955866.06 frames. ], batch size: 43, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:50:26,598 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=49127.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:50:28,940 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.707e+02 1.914e+02 2.346e+02 3.542e+02, threshold=3.827e+02, percent-clipped=0.0 +2023-03-26 10:50:30,170 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6397, 1.3931, 2.0793, 3.2462, 2.1102, 2.4593, 0.9359, 2.4699], + device='cuda:5'), covar=tensor([0.1836, 0.1567, 0.1306, 0.0583, 0.0884, 0.1285, 0.1983, 0.0653], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0133, 0.0165, 0.0102, 0.0138, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:50:46,339 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4379, 3.8798, 4.1152, 4.1856, 4.1880, 3.9216, 4.4853, 1.9414], + device='cuda:5'), covar=tensor([0.0668, 0.0764, 0.0674, 0.0793, 0.1016, 0.1126, 0.0566, 0.4433], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0246, 0.0277, 0.0293, 0.0334, 0.0284, 0.0302, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:50:56,005 INFO [finetune.py:976] (5/7) Epoch 9, batch 3350, loss[loss=0.1681, simple_loss=0.2458, pruned_loss=0.04522, over 4921.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2665, pruned_loss=0.06891, over 952913.79 frames. ], batch size: 42, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:50:59,115 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49177.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:51:17,583 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49194.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:51:50,172 INFO [finetune.py:976] (5/7) Epoch 9, batch 3400, loss[loss=0.1757, simple_loss=0.2624, pruned_loss=0.04453, over 4792.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2666, pruned_loss=0.06899, over 952176.34 frames. ], batch size: 51, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:51:59,826 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.353e+01 1.606e+02 1.878e+02 2.295e+02 4.525e+02, threshold=3.756e+02, percent-clipped=2.0 +2023-03-26 10:52:10,030 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49236.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:52:55,267 INFO [finetune.py:976] (5/7) Epoch 9, batch 3450, loss[loss=0.17, simple_loss=0.2411, pruned_loss=0.04943, over 4729.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2665, pruned_loss=0.06872, over 951969.84 frames. ], batch size: 27, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:52:56,670 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2218, 1.2965, 1.5691, 1.0159, 1.1361, 1.4102, 1.2526, 1.5184], + device='cuda:5'), covar=tensor([0.1180, 0.2197, 0.1313, 0.1640, 0.0980, 0.1228, 0.2897, 0.0836], + device='cuda:5'), in_proj_covar=tensor([0.0199, 0.0205, 0.0194, 0.0193, 0.0181, 0.0218, 0.0218, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:53:11,849 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 10:53:32,535 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49297.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:53:53,352 INFO [finetune.py:976] (5/7) Epoch 9, batch 3500, loss[loss=0.2081, simple_loss=0.2586, pruned_loss=0.07879, over 4911.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2629, pruned_loss=0.06723, over 950829.84 frames. ], batch size: 46, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:53:58,769 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.331e+01 1.641e+02 1.916e+02 2.289e+02 6.335e+02, threshold=3.833e+02, percent-clipped=2.0 +2023-03-26 10:54:13,547 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 10:54:34,214 INFO [finetune.py:976] (5/7) Epoch 9, batch 3550, loss[loss=0.1615, simple_loss=0.2303, pruned_loss=0.04634, over 4904.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2594, pruned_loss=0.06599, over 951597.90 frames. ], batch size: 46, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:55:09,813 INFO [finetune.py:976] (5/7) Epoch 9, batch 3600, loss[loss=0.2062, simple_loss=0.2608, pruned_loss=0.07584, over 4786.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2584, pruned_loss=0.0656, over 953574.90 frames. ], batch size: 26, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:55:09,917 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49422.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:55:11,104 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6394, 1.5831, 1.6774, 1.8020, 1.6504, 3.3322, 1.5003, 1.6343], + device='cuda:5'), covar=tensor([0.0969, 0.1737, 0.1100, 0.0983, 0.1556, 0.0334, 0.1448, 0.1656], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0075, 0.0078, 0.0091, 0.0082, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 10:55:15,219 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.662e+02 2.002e+02 2.382e+02 4.044e+02, threshold=4.004e+02, percent-clipped=1.0 +2023-03-26 10:55:15,946 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49432.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:55:23,263 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0270, 1.9553, 1.6068, 1.8974, 2.0191, 1.7136, 2.3203, 2.0016], + device='cuda:5'), covar=tensor([0.1475, 0.2406, 0.3382, 0.3009, 0.2703, 0.1763, 0.3875, 0.1974], + device='cuda:5'), in_proj_covar=tensor([0.0174, 0.0189, 0.0235, 0.0255, 0.0239, 0.0196, 0.0214, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:55:43,178 INFO [finetune.py:976] (5/7) Epoch 9, batch 3650, loss[loss=0.2342, simple_loss=0.3076, pruned_loss=0.08035, over 4764.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2635, pruned_loss=0.06779, over 954035.25 frames. ], batch size: 59, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:55:46,396 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49477.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:55:50,112 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49483.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:55:56,199 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49493.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 10:55:56,770 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49494.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:56:13,508 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8352, 1.7108, 1.5915, 1.5794, 2.0906, 2.0983, 1.7716, 1.5683], + device='cuda:5'), covar=tensor([0.0328, 0.0353, 0.0549, 0.0349, 0.0245, 0.0542, 0.0272, 0.0439], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0139, 0.0115, 0.0102, 0.0102, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0282e-05, 8.5445e-05, 1.1107e-04, 9.0304e-05, 8.0160e-05, 7.5519e-05, + 6.8626e-05, 8.3614e-05], device='cuda:5') +2023-03-26 10:56:14,178 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.17 vs. limit=5.0 +2023-03-26 10:56:17,057 INFO [finetune.py:976] (5/7) Epoch 9, batch 3700, loss[loss=0.1799, simple_loss=0.2547, pruned_loss=0.05252, over 4826.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2665, pruned_loss=0.06889, over 952153.95 frames. ], batch size: 30, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:56:18,956 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=49525.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:56:22,120 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 10:56:22,512 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.782e+02 2.076e+02 2.384e+02 4.659e+02, threshold=4.152e+02, percent-clipped=5.0 +2023-03-26 10:56:29,221 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=49542.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:56:44,864 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0098, 1.8667, 1.7170, 2.1198, 2.5699, 2.1726, 1.6946, 1.6642], + device='cuda:5'), covar=tensor([0.2179, 0.2028, 0.1923, 0.1646, 0.1600, 0.1079, 0.2443, 0.1854], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0208, 0.0205, 0.0187, 0.0239, 0.0178, 0.0212, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:56:50,563 INFO [finetune.py:976] (5/7) Epoch 9, batch 3750, loss[loss=0.208, simple_loss=0.2694, pruned_loss=0.07329, over 4819.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2671, pruned_loss=0.06927, over 951951.13 frames. ], batch size: 30, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:57:02,709 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49592.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:57:06,422 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1041, 1.0227, 1.0524, 0.4193, 0.9954, 1.1679, 1.2315, 1.0003], + device='cuda:5'), covar=tensor([0.0850, 0.0613, 0.0539, 0.0580, 0.0577, 0.0716, 0.0434, 0.0723], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0156, 0.0121, 0.0135, 0.0132, 0.0126, 0.0146, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.6063e-05, 1.1446e-04, 8.7487e-05, 9.7864e-05, 9.4404e-05, 9.2107e-05, + 1.0698e-04, 1.0845e-04], device='cuda:5') +2023-03-26 10:57:28,152 INFO [finetune.py:976] (5/7) Epoch 9, batch 3800, loss[loss=0.2187, simple_loss=0.2906, pruned_loss=0.07342, over 4838.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2679, pruned_loss=0.0696, over 952551.81 frames. ], batch size: 44, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:57:39,046 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.087e+02 1.635e+02 1.863e+02 2.259e+02 4.048e+02, threshold=3.725e+02, percent-clipped=0.0 +2023-03-26 10:57:50,512 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 10:58:12,409 INFO [finetune.py:976] (5/7) Epoch 9, batch 3850, loss[loss=0.1999, simple_loss=0.26, pruned_loss=0.06992, over 4896.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2661, pruned_loss=0.06881, over 952905.36 frames. ], batch size: 35, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:58:17,296 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3102, 2.1576, 1.6871, 0.8045, 1.8730, 1.8672, 1.5786, 1.8964], + device='cuda:5'), covar=tensor([0.0961, 0.0707, 0.1319, 0.1920, 0.1312, 0.2096, 0.2302, 0.0876], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0199, 0.0200, 0.0187, 0.0215, 0.0206, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:58:27,579 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7770, 2.1180, 1.1870, 2.6341, 2.9966, 2.3208, 2.5072, 2.4467], + device='cuda:5'), covar=tensor([0.1138, 0.1857, 0.2046, 0.0998, 0.1500, 0.1599, 0.1282, 0.1858], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0097, 0.0114, 0.0093, 0.0122, 0.0096, 0.0100, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:58:28,201 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0336, 1.4380, 0.8316, 2.0238, 2.4445, 1.7581, 1.7982, 2.0121], + device='cuda:5'), covar=tensor([0.1377, 0.2004, 0.2184, 0.1149, 0.1838, 0.2110, 0.1354, 0.2008], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0097, 0.0114, 0.0093, 0.0122, 0.0096, 0.0100, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 10:58:48,067 INFO [finetune.py:976] (5/7) Epoch 9, batch 3900, loss[loss=0.175, simple_loss=0.2407, pruned_loss=0.05465, over 4831.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2628, pruned_loss=0.06737, over 953561.85 frames. ], batch size: 39, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:58:58,263 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.078e+02 1.638e+02 1.913e+02 2.415e+02 4.821e+02, threshold=3.825e+02, percent-clipped=2.0 +2023-03-26 10:59:31,480 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49766.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:59:34,349 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.96 vs. limit=5.0 +2023-03-26 10:59:36,464 INFO [finetune.py:976] (5/7) Epoch 9, batch 3950, loss[loss=0.2155, simple_loss=0.2619, pruned_loss=0.08455, over 4816.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2596, pruned_loss=0.06645, over 956285.34 frames. ], batch size: 25, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 10:59:40,656 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49778.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 10:59:40,705 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2109, 2.2021, 2.2309, 1.5411, 2.2200, 2.3799, 2.1918, 1.9251], + device='cuda:5'), covar=tensor([0.0647, 0.0726, 0.0748, 0.0978, 0.0605, 0.0758, 0.0737, 0.1029], + device='cuda:5'), in_proj_covar=tensor([0.0137, 0.0133, 0.0144, 0.0124, 0.0118, 0.0144, 0.0144, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 10:59:47,185 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49788.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 11:00:09,537 INFO [finetune.py:976] (5/7) Epoch 9, batch 4000, loss[loss=0.18, simple_loss=0.2357, pruned_loss=0.06212, over 4747.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2597, pruned_loss=0.06727, over 955037.13 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 11:00:12,494 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2340, 2.0709, 1.6273, 0.8141, 1.7533, 1.7971, 1.5253, 1.8203], + device='cuda:5'), covar=tensor([0.0848, 0.0623, 0.1272, 0.1648, 0.1249, 0.1845, 0.1910, 0.0820], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0198, 0.0199, 0.0186, 0.0214, 0.0205, 0.0221, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:00:12,552 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 11:00:13,721 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49827.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:00:16,514 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.645e+02 2.017e+02 2.376e+02 6.319e+02, threshold=4.034e+02, percent-clipped=3.0 +2023-03-26 11:00:42,825 INFO [finetune.py:976] (5/7) Epoch 9, batch 4050, loss[loss=0.2128, simple_loss=0.2875, pruned_loss=0.06903, over 4896.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2637, pruned_loss=0.06893, over 955141.14 frames. ], batch size: 37, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 11:00:56,960 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49892.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:00:57,008 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5685, 1.4915, 1.3489, 1.4281, 1.5942, 1.3771, 1.7674, 1.5631], + device='cuda:5'), covar=tensor([0.1311, 0.2121, 0.2826, 0.2337, 0.2347, 0.1621, 0.2486, 0.1722], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0188, 0.0232, 0.0253, 0.0237, 0.0195, 0.0211, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:00:58,215 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4422, 1.4181, 1.3204, 1.6929, 1.5170, 2.9902, 1.2964, 1.5120], + device='cuda:5'), covar=tensor([0.1020, 0.1860, 0.1298, 0.0958, 0.1704, 0.0275, 0.1648, 0.1924], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0075, 0.0078, 0.0091, 0.0081, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004], + device='cuda:5') +2023-03-26 11:01:05,502 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9169, 1.8094, 1.4633, 1.7842, 1.8820, 1.5947, 2.1220, 1.8966], + device='cuda:5'), covar=tensor([0.1446, 0.2375, 0.3619, 0.2751, 0.2873, 0.1914, 0.3007, 0.2013], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0188, 0.0233, 0.0253, 0.0238, 0.0195, 0.0212, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:01:15,998 INFO [finetune.py:976] (5/7) Epoch 9, batch 4100, loss[loss=0.2095, simple_loss=0.2838, pruned_loss=0.06755, over 4919.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2668, pruned_loss=0.0697, over 954738.31 frames. ], batch size: 42, lr: 3.78e-03, grad_scale: 32.0 +2023-03-26 11:01:22,951 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.254e+02 1.718e+02 2.083e+02 2.512e+02 3.689e+02, threshold=4.166e+02, percent-clipped=0.0 +2023-03-26 11:01:28,935 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=49940.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:01:48,772 INFO [finetune.py:976] (5/7) Epoch 9, batch 4150, loss[loss=0.2053, simple_loss=0.2784, pruned_loss=0.06604, over 4814.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2685, pruned_loss=0.07011, over 954412.80 frames. ], batch size: 39, lr: 3.78e-03, grad_scale: 16.0 +2023-03-26 11:02:23,458 INFO [finetune.py:976] (5/7) Epoch 9, batch 4200, loss[loss=0.1654, simple_loss=0.2401, pruned_loss=0.04538, over 4904.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2677, pruned_loss=0.06943, over 954707.76 frames. ], batch size: 37, lr: 3.78e-03, grad_scale: 16.0 +2023-03-26 11:02:31,376 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.147e+02 1.706e+02 2.002e+02 2.506e+02 6.230e+02, threshold=4.003e+02, percent-clipped=2.0 +2023-03-26 11:02:35,551 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1453, 1.8985, 2.0524, 0.8517, 2.2946, 2.3685, 1.9909, 1.7983], + device='cuda:5'), covar=tensor([0.1259, 0.0957, 0.0729, 0.0908, 0.0459, 0.0892, 0.0606, 0.0986], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0157, 0.0122, 0.0136, 0.0133, 0.0127, 0.0148, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.6949e-05, 1.1560e-04, 8.8293e-05, 9.8841e-05, 9.5275e-05, 9.3194e-05, + 1.0840e-04, 1.0960e-04], device='cuda:5') +2023-03-26 11:02:57,297 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 11:03:01,009 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8928, 3.3983, 3.5623, 3.7808, 3.6778, 3.4297, 3.9333, 1.2484], + device='cuda:5'), covar=tensor([0.0821, 0.0836, 0.0747, 0.0853, 0.1290, 0.1359, 0.0786, 0.4934], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0244, 0.0276, 0.0292, 0.0329, 0.0281, 0.0300, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:03:12,369 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.45 vs. limit=5.0 +2023-03-26 11:03:15,796 INFO [finetune.py:976] (5/7) Epoch 9, batch 4250, loss[loss=0.2426, simple_loss=0.2897, pruned_loss=0.09771, over 4893.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2657, pruned_loss=0.06884, over 953369.19 frames. ], batch size: 36, lr: 3.78e-03, grad_scale: 16.0 +2023-03-26 11:03:24,161 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50078.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:03:32,333 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50088.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:03:45,151 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-03-26 11:03:53,925 INFO [finetune.py:976] (5/7) Epoch 9, batch 4300, loss[loss=0.1389, simple_loss=0.2093, pruned_loss=0.0343, over 4761.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2619, pruned_loss=0.06689, over 953116.99 frames. ], batch size: 28, lr: 3.78e-03, grad_scale: 16.0 +2023-03-26 11:03:53,998 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50122.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:03:56,368 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=50126.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:03:57,637 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50128.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:04:00,427 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.003e+02 1.608e+02 1.917e+02 2.228e+02 4.011e+02, threshold=3.835e+02, percent-clipped=1.0 +2023-03-26 11:04:03,883 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=50136.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 11:04:49,371 INFO [finetune.py:976] (5/7) Epoch 9, batch 4350, loss[loss=0.2213, simple_loss=0.2677, pruned_loss=0.08744, over 4823.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2583, pruned_loss=0.06542, over 955582.80 frames. ], batch size: 30, lr: 3.78e-03, grad_scale: 16.0 +2023-03-26 11:05:18,373 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50189.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:05:30,444 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50197.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:05:41,557 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7041, 1.9636, 1.6505, 1.4931, 2.1403, 2.3180, 1.8671, 1.8500], + device='cuda:5'), covar=tensor([0.0398, 0.0329, 0.0527, 0.0411, 0.0359, 0.0466, 0.0347, 0.0345], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0108, 0.0138, 0.0114, 0.0102, 0.0101, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.9587e-05, 8.4884e-05, 1.1026e-04, 8.9278e-05, 7.9656e-05, 7.5223e-05, + 6.8646e-05, 8.2996e-05], device='cuda:5') +2023-03-26 11:06:02,456 INFO [finetune.py:976] (5/7) Epoch 9, batch 4400, loss[loss=0.2223, simple_loss=0.3034, pruned_loss=0.0706, over 4855.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2609, pruned_loss=0.06722, over 953157.52 frames. ], batch size: 44, lr: 3.78e-03, grad_scale: 16.0 +2023-03-26 11:06:02,589 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3156, 1.3885, 1.1716, 1.4061, 1.6175, 1.5248, 1.3808, 1.2201], + device='cuda:5'), covar=tensor([0.0360, 0.0274, 0.0636, 0.0275, 0.0229, 0.0459, 0.0316, 0.0392], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0109, 0.0139, 0.0114, 0.0102, 0.0101, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([6.9767e-05, 8.5019e-05, 1.1048e-04, 8.9436e-05, 7.9808e-05, 7.5305e-05, + 6.8755e-05, 8.3104e-05], device='cuda:5') +2023-03-26 11:06:14,078 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.827e+01 1.714e+02 1.989e+02 2.480e+02 5.028e+02, threshold=3.977e+02, percent-clipped=2.0 +2023-03-26 11:06:48,136 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50258.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:06:58,871 INFO [finetune.py:976] (5/7) Epoch 9, batch 4450, loss[loss=0.1975, simple_loss=0.2654, pruned_loss=0.0648, over 4899.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2645, pruned_loss=0.0679, over 953930.97 frames. ], batch size: 35, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:07:17,513 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 11:07:32,435 INFO [finetune.py:976] (5/7) Epoch 9, batch 4500, loss[loss=0.2157, simple_loss=0.2716, pruned_loss=0.07986, over 4901.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2663, pruned_loss=0.06864, over 953361.85 frames. ], batch size: 37, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:07:38,448 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.523e+02 1.896e+02 2.480e+02 4.445e+02, threshold=3.793e+02, percent-clipped=2.0 +2023-03-26 11:07:53,473 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.37 vs. limit=5.0 +2023-03-26 11:08:05,986 INFO [finetune.py:976] (5/7) Epoch 9, batch 4550, loss[loss=0.1977, simple_loss=0.2645, pruned_loss=0.06545, over 4754.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2682, pruned_loss=0.06941, over 954494.99 frames. ], batch size: 59, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:08:10,985 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8704, 1.7663, 1.8793, 1.2974, 1.8888, 1.9344, 1.8510, 1.5796], + device='cuda:5'), covar=tensor([0.0544, 0.0612, 0.0663, 0.0794, 0.0601, 0.0638, 0.0553, 0.1037], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0132, 0.0144, 0.0123, 0.0117, 0.0143, 0.0143, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:08:58,492 INFO [finetune.py:976] (5/7) Epoch 9, batch 4600, loss[loss=0.1676, simple_loss=0.2389, pruned_loss=0.04815, over 4805.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2664, pruned_loss=0.06793, over 955242.94 frames. ], batch size: 29, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:08:58,606 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50422.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:09:03,958 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2723, 2.8390, 2.5384, 1.2684, 2.6567, 2.2902, 2.0203, 2.3514], + device='cuda:5'), covar=tensor([0.0902, 0.0838, 0.1955, 0.2291, 0.1859, 0.2065, 0.2220, 0.1286], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0199, 0.0200, 0.0186, 0.0214, 0.0206, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:09:05,737 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3151, 2.9463, 3.0698, 3.2401, 3.0978, 2.9175, 3.3459, 1.0019], + device='cuda:5'), covar=tensor([0.0999, 0.0879, 0.1023, 0.1068, 0.1566, 0.1680, 0.0988, 0.5188], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0244, 0.0276, 0.0291, 0.0330, 0.0282, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:09:06,262 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.671e+02 1.983e+02 2.416e+02 3.848e+02, threshold=3.965e+02, percent-clipped=1.0 +2023-03-26 11:09:25,361 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5916, 1.5002, 1.4057, 1.5031, 1.7822, 1.7532, 1.5947, 1.3632], + device='cuda:5'), covar=tensor([0.0310, 0.0295, 0.0716, 0.0295, 0.0210, 0.0409, 0.0293, 0.0403], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0139, 0.0114, 0.0102, 0.0102, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0100e-05, 8.5571e-05, 1.1108e-04, 8.9782e-05, 7.9973e-05, 7.5724e-05, + 6.8961e-05, 8.3563e-05], device='cuda:5') +2023-03-26 11:09:39,358 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=50470.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:09:40,500 INFO [finetune.py:976] (5/7) Epoch 9, batch 4650, loss[loss=0.1996, simple_loss=0.2575, pruned_loss=0.07082, over 4897.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2636, pruned_loss=0.06735, over 956038.22 frames. ], batch size: 32, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:09:50,494 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50484.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:10:15,811 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 11:10:18,410 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 11:10:22,781 INFO [finetune.py:976] (5/7) Epoch 9, batch 4700, loss[loss=0.1965, simple_loss=0.2427, pruned_loss=0.07519, over 4055.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2608, pruned_loss=0.06711, over 954395.00 frames. ], batch size: 17, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:10:29,351 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.527e+02 1.850e+02 2.224e+02 3.838e+02, threshold=3.699e+02, percent-clipped=0.0 +2023-03-26 11:10:38,045 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50546.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:10:42,729 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50553.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 11:10:56,120 INFO [finetune.py:976] (5/7) Epoch 9, batch 4750, loss[loss=0.1906, simple_loss=0.263, pruned_loss=0.05911, over 4936.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2593, pruned_loss=0.0666, over 955705.42 frames. ], batch size: 33, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:11:18,581 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50607.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:11:29,494 INFO [finetune.py:976] (5/7) Epoch 9, batch 4800, loss[loss=0.2982, simple_loss=0.3358, pruned_loss=0.1303, over 4133.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2625, pruned_loss=0.06803, over 952761.86 frames. ], batch size: 65, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:11:36,103 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.695e+01 1.587e+02 1.907e+02 2.189e+02 3.978e+02, threshold=3.813e+02, percent-clipped=2.0 +2023-03-26 11:11:52,568 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 11:12:03,076 INFO [finetune.py:976] (5/7) Epoch 9, batch 4850, loss[loss=0.2049, simple_loss=0.2652, pruned_loss=0.07227, over 4857.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2657, pruned_loss=0.06855, over 954068.89 frames. ], batch size: 31, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:12:23,761 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 11:12:36,218 INFO [finetune.py:976] (5/7) Epoch 9, batch 4900, loss[loss=0.2724, simple_loss=0.3214, pruned_loss=0.1116, over 4870.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2679, pruned_loss=0.06956, over 955864.25 frames. ], batch size: 34, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:12:42,295 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.610e+02 1.915e+02 2.289e+02 4.400e+02, threshold=3.830e+02, percent-clipped=2.0 +2023-03-26 11:13:08,690 INFO [finetune.py:976] (5/7) Epoch 9, batch 4950, loss[loss=0.1861, simple_loss=0.2636, pruned_loss=0.0543, over 4917.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2682, pruned_loss=0.06919, over 956312.56 frames. ], batch size: 33, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:13:15,369 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50782.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:13:16,531 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50784.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:13:23,025 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7118, 2.3841, 2.9587, 1.8290, 2.8211, 3.0058, 2.3049, 3.1537], + device='cuda:5'), covar=tensor([0.1385, 0.2073, 0.1473, 0.2636, 0.0893, 0.1714, 0.2629, 0.0821], + device='cuda:5'), in_proj_covar=tensor([0.0200, 0.0206, 0.0195, 0.0193, 0.0180, 0.0219, 0.0219, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:13:24,891 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0241, 1.8675, 1.6019, 1.8305, 1.8361, 1.7932, 1.7794, 2.4653], + device='cuda:5'), covar=tensor([0.4212, 0.4636, 0.3785, 0.4670, 0.4594, 0.2697, 0.4415, 0.1927], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0223, 0.0282, 0.0244, 0.0208, 0.0247, 0.0212], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:13:33,490 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50808.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:13:51,971 INFO [finetune.py:976] (5/7) Epoch 9, batch 5000, loss[loss=0.21, simple_loss=0.2741, pruned_loss=0.07294, over 4902.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2663, pruned_loss=0.06861, over 956883.35 frames. ], batch size: 43, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:14:03,064 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 1.752e+02 2.044e+02 2.444e+02 6.074e+02, threshold=4.089e+02, percent-clipped=4.0 +2023-03-26 11:14:03,135 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=50832.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:14:13,396 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50843.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:14:23,647 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3558, 1.4540, 1.4397, 1.5096, 1.6340, 3.0058, 1.3373, 1.6588], + device='cuda:5'), covar=tensor([0.1110, 0.1901, 0.1150, 0.1079, 0.1621, 0.0295, 0.1628, 0.1732], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0078, 0.0091, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 11:14:23,675 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4353, 1.1940, 1.3031, 1.3730, 1.5946, 1.5186, 1.4228, 1.2793], + device='cuda:5'), covar=tensor([0.0331, 0.0333, 0.0541, 0.0304, 0.0215, 0.0398, 0.0318, 0.0386], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0108, 0.0138, 0.0114, 0.0101, 0.0100, 0.0091, 0.0107], + device='cuda:5'), out_proj_covar=tensor([6.9673e-05, 8.4737e-05, 1.0994e-04, 8.9383e-05, 7.9231e-05, 7.4590e-05, + 6.8474e-05, 8.2687e-05], device='cuda:5') +2023-03-26 11:14:24,243 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50853.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:14:37,873 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50869.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 11:14:40,057 INFO [finetune.py:976] (5/7) Epoch 9, batch 5050, loss[loss=0.1607, simple_loss=0.2315, pruned_loss=0.04488, over 4915.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2637, pruned_loss=0.06755, over 957688.16 frames. ], batch size: 36, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:15:00,245 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=50901.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 11:15:00,855 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50902.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:15:13,785 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1460, 1.7162, 2.0070, 2.0264, 1.7431, 1.7332, 1.9754, 1.9277], + device='cuda:5'), covar=tensor([0.4684, 0.5219, 0.4076, 0.5077, 0.6114, 0.4854, 0.6243, 0.3909], + device='cuda:5'), in_proj_covar=tensor([0.0235, 0.0242, 0.0254, 0.0257, 0.0252, 0.0227, 0.0275, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:15:21,261 INFO [finetune.py:976] (5/7) Epoch 9, batch 5100, loss[loss=0.1874, simple_loss=0.2447, pruned_loss=0.06498, over 4939.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2594, pruned_loss=0.06569, over 958099.48 frames. ], batch size: 33, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:15:29,782 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.571e+02 1.989e+02 2.366e+02 5.072e+02, threshold=3.977e+02, percent-clipped=2.0 +2023-03-26 11:15:38,844 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50946.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:15:54,096 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.33 vs. limit=5.0 +2023-03-26 11:15:55,128 INFO [finetune.py:976] (5/7) Epoch 9, batch 5150, loss[loss=0.2415, simple_loss=0.2943, pruned_loss=0.09432, over 4912.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2596, pruned_loss=0.06594, over 954166.51 frames. ], batch size: 36, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:16:19,675 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51007.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:16:29,116 INFO [finetune.py:976] (5/7) Epoch 9, batch 5200, loss[loss=0.2249, simple_loss=0.2885, pruned_loss=0.08061, over 4882.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2641, pruned_loss=0.06813, over 952188.80 frames. ], batch size: 31, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:16:37,435 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 1.691e+02 2.095e+02 2.506e+02 4.401e+02, threshold=4.191e+02, percent-clipped=2.0 +2023-03-26 11:16:38,000 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3636, 2.0222, 2.8222, 4.3298, 3.2066, 2.9767, 1.1987, 3.3978], + device='cuda:5'), covar=tensor([0.1697, 0.1458, 0.1380, 0.0569, 0.0659, 0.1383, 0.1923, 0.0569], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0118, 0.0134, 0.0165, 0.0102, 0.0139, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 11:17:12,602 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51062.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:17:18,570 INFO [finetune.py:976] (5/7) Epoch 9, batch 5250, loss[loss=0.2179, simple_loss=0.2938, pruned_loss=0.07102, over 4920.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2664, pruned_loss=0.06895, over 952031.95 frames. ], batch size: 38, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:17:18,697 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6117, 2.8729, 2.4638, 2.1007, 2.6734, 2.8852, 2.7157, 2.4773], + device='cuda:5'), covar=tensor([0.0586, 0.0488, 0.0724, 0.0762, 0.0488, 0.0653, 0.0654, 0.0815], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0133, 0.0146, 0.0124, 0.0118, 0.0144, 0.0144, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:17:32,340 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9818, 1.8993, 1.6156, 1.8061, 1.9975, 1.6882, 2.1993, 1.9065], + device='cuda:5'), covar=tensor([0.1543, 0.2301, 0.3404, 0.2892, 0.2873, 0.1864, 0.3097, 0.2036], + device='cuda:5'), in_proj_covar=tensor([0.0174, 0.0188, 0.0233, 0.0255, 0.0239, 0.0196, 0.0212, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:17:50,764 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-03-26 11:17:51,200 INFO [finetune.py:976] (5/7) Epoch 9, batch 5300, loss[loss=0.2226, simple_loss=0.2913, pruned_loss=0.07689, over 4918.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.269, pruned_loss=0.07087, over 951743.74 frames. ], batch size: 36, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:17:51,956 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51123.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:17:57,263 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.167e+02 1.689e+02 2.023e+02 2.414e+02 5.734e+02, threshold=4.045e+02, percent-clipped=1.0 +2023-03-26 11:18:01,863 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51138.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:18:19,584 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51164.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 11:18:24,359 INFO [finetune.py:976] (5/7) Epoch 9, batch 5350, loss[loss=0.1834, simple_loss=0.2532, pruned_loss=0.05684, over 4720.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2691, pruned_loss=0.07056, over 953416.02 frames. ], batch size: 23, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:18:24,453 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51172.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 11:18:56,194 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51202.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:19:18,527 INFO [finetune.py:976] (5/7) Epoch 9, batch 5400, loss[loss=0.1917, simple_loss=0.2468, pruned_loss=0.06829, over 4825.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2657, pruned_loss=0.06923, over 955133.02 frames. ], batch size: 30, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:19:26,736 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.913e+01 1.608e+02 1.826e+02 2.251e+02 3.272e+02, threshold=3.651e+02, percent-clipped=0.0 +2023-03-26 11:19:32,716 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51233.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 11:19:48,900 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=51250.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:20:03,230 INFO [finetune.py:976] (5/7) Epoch 9, batch 5450, loss[loss=0.1782, simple_loss=0.2547, pruned_loss=0.05085, over 4785.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2613, pruned_loss=0.06718, over 954809.34 frames. ], batch size: 29, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:20:07,005 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6534, 2.4147, 1.9596, 0.9649, 2.0742, 2.0168, 1.7981, 2.1995], + device='cuda:5'), covar=tensor([0.0831, 0.0877, 0.1805, 0.2281, 0.1837, 0.2537, 0.2270, 0.0994], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0199, 0.0201, 0.0187, 0.0215, 0.0207, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:20:31,733 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51302.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:20:48,635 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 11:20:50,272 INFO [finetune.py:976] (5/7) Epoch 9, batch 5500, loss[loss=0.1996, simple_loss=0.2629, pruned_loss=0.06821, over 4820.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2579, pruned_loss=0.06582, over 954345.16 frames. ], batch size: 41, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:20:56,821 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.040e+02 1.581e+02 1.869e+02 2.249e+02 3.902e+02, threshold=3.738e+02, percent-clipped=2.0 +2023-03-26 11:21:48,487 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0096, 1.2055, 1.9004, 1.8470, 1.6849, 1.6222, 1.7431, 1.7583], + device='cuda:5'), covar=tensor([0.4058, 0.5080, 0.4201, 0.4445, 0.5628, 0.4220, 0.5632, 0.3897], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0242, 0.0254, 0.0257, 0.0251, 0.0226, 0.0276, 0.0230], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:21:48,956 INFO [finetune.py:976] (5/7) Epoch 9, batch 5550, loss[loss=0.1671, simple_loss=0.2357, pruned_loss=0.04921, over 4779.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2594, pruned_loss=0.06589, over 953169.62 frames. ], batch size: 25, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:21:59,548 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51382.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:22:07,982 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51392.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:22:09,484 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 11:22:14,364 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-03-26 11:22:24,896 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51418.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:22:27,577 INFO [finetune.py:976] (5/7) Epoch 9, batch 5600, loss[loss=0.2253, simple_loss=0.2828, pruned_loss=0.08395, over 4843.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2638, pruned_loss=0.06788, over 952759.30 frames. ], batch size: 47, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:22:33,286 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.626e+02 2.005e+02 2.362e+02 4.096e+02, threshold=4.011e+02, percent-clipped=2.0 +2023-03-26 11:22:36,873 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51438.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:22:39,797 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51443.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:22:46,030 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51453.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:22:52,445 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51464.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:22:57,077 INFO [finetune.py:976] (5/7) Epoch 9, batch 5650, loss[loss=0.1659, simple_loss=0.2321, pruned_loss=0.04978, over 4745.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2664, pruned_loss=0.06832, over 952644.32 frames. ], batch size: 26, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:23:05,307 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=51486.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:23:05,978 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2458, 1.9157, 2.5321, 1.8405, 2.3230, 2.3647, 1.9477, 2.6303], + device='cuda:5'), covar=tensor([0.1374, 0.1884, 0.1425, 0.1917, 0.1000, 0.1455, 0.2431, 0.0897], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0203, 0.0193, 0.0190, 0.0178, 0.0215, 0.0216, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:23:10,710 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51495.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:23:16,663 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5281, 2.2760, 2.1910, 1.7333, 2.6909, 2.8463, 2.4292, 2.3715], + device='cuda:5'), covar=tensor([0.0274, 0.0323, 0.0442, 0.0388, 0.0249, 0.0474, 0.0276, 0.0312], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0110, 0.0141, 0.0116, 0.0103, 0.0102, 0.0092, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0860e-05, 8.5982e-05, 1.1196e-04, 9.0866e-05, 8.0882e-05, 7.5892e-05, + 6.9618e-05, 8.3524e-05], device='cuda:5') +2023-03-26 11:23:20,779 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=51512.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:23:26,712 INFO [finetune.py:976] (5/7) Epoch 9, batch 5700, loss[loss=0.1453, simple_loss=0.2028, pruned_loss=0.04391, over 4009.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2611, pruned_loss=0.06748, over 930394.89 frames. ], batch size: 17, lr: 3.77e-03, grad_scale: 16.0 +2023-03-26 11:23:30,378 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51528.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 11:23:32,890 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.987e+01 1.624e+02 1.963e+02 2.341e+02 6.572e+02, threshold=3.927e+02, percent-clipped=1.0 +2023-03-26 11:23:57,292 INFO [finetune.py:976] (5/7) Epoch 10, batch 0, loss[loss=0.2432, simple_loss=0.2972, pruned_loss=0.09462, over 4829.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2972, pruned_loss=0.09462, over 4829.00 frames. ], batch size: 47, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:23:57,293 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 11:24:16,170 INFO [finetune.py:1010] (5/7) Epoch 10, validation: loss=0.1604, simple_loss=0.2317, pruned_loss=0.04451, over 2265189.00 frames. +2023-03-26 11:24:16,170 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 11:24:22,510 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51556.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:24:40,654 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 11:24:46,067 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2742, 2.1557, 1.7595, 2.1936, 2.2469, 1.9239, 2.5808, 2.2184], + device='cuda:5'), covar=tensor([0.1456, 0.2421, 0.3497, 0.2986, 0.2715, 0.1830, 0.3489, 0.2058], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0188, 0.0232, 0.0253, 0.0238, 0.0195, 0.0211, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:24:58,293 INFO [finetune.py:976] (5/7) Epoch 10, batch 50, loss[loss=0.1944, simple_loss=0.2569, pruned_loss=0.06596, over 4769.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2725, pruned_loss=0.0736, over 215780.74 frames. ], batch size: 26, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:25:01,623 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51602.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:25:20,208 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.916e+01 1.735e+02 2.131e+02 2.642e+02 7.480e+02, threshold=4.262e+02, percent-clipped=4.0 +2023-03-26 11:25:31,992 INFO [finetune.py:976] (5/7) Epoch 10, batch 100, loss[loss=0.1817, simple_loss=0.2447, pruned_loss=0.05941, over 4801.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2642, pruned_loss=0.06851, over 380231.43 frames. ], batch size: 39, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:25:33,107 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=51650.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:25:42,999 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6525, 2.4228, 3.0823, 1.9076, 2.9915, 2.8904, 2.4773, 3.0438], + device='cuda:5'), covar=tensor([0.1514, 0.1993, 0.1480, 0.2707, 0.0908, 0.1681, 0.2572, 0.1147], + device='cuda:5'), in_proj_covar=tensor([0.0199, 0.0206, 0.0196, 0.0193, 0.0180, 0.0217, 0.0219, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:26:04,769 INFO [finetune.py:976] (5/7) Epoch 10, batch 150, loss[loss=0.1791, simple_loss=0.2393, pruned_loss=0.05939, over 4810.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.258, pruned_loss=0.06586, over 508315.61 frames. ], batch size: 41, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:26:18,704 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51718.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:26:33,393 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.594e+02 1.858e+02 2.240e+02 3.308e+02, threshold=3.716e+02, percent-clipped=0.0 +2023-03-26 11:26:37,116 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51738.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:26:47,598 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51747.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:26:48,176 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51748.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:26:52,006 INFO [finetune.py:976] (5/7) Epoch 10, batch 200, loss[loss=0.2103, simple_loss=0.2736, pruned_loss=0.07348, over 4762.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2573, pruned_loss=0.06639, over 607359.63 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:27:04,852 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=51766.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:27:25,426 INFO [finetune.py:976] (5/7) Epoch 10, batch 250, loss[loss=0.1884, simple_loss=0.2579, pruned_loss=0.05942, over 4927.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2607, pruned_loss=0.06699, over 684340.75 frames. ], batch size: 38, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:27:25,618 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-03-26 11:27:33,015 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51808.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:27:45,688 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51828.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 11:27:48,004 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.605e+02 1.930e+02 2.331e+02 5.576e+02, threshold=3.861e+02, percent-clipped=5.0 +2023-03-26 11:27:58,882 INFO [finetune.py:976] (5/7) Epoch 10, batch 300, loss[loss=0.1732, simple_loss=0.255, pruned_loss=0.04567, over 4923.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2635, pruned_loss=0.0677, over 744775.76 frames. ], batch size: 42, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:28:00,160 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51851.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:28:02,625 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4466, 1.5346, 1.8398, 1.7879, 1.5259, 3.5028, 1.3669, 1.6464], + device='cuda:5'), covar=tensor([0.1002, 0.1727, 0.1217, 0.1018, 0.1663, 0.0231, 0.1469, 0.1678], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0076, 0.0078, 0.0091, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 11:28:17,690 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=51876.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:28:17,975 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.94 vs. limit=5.0 +2023-03-26 11:28:31,958 INFO [finetune.py:976] (5/7) Epoch 10, batch 350, loss[loss=0.1896, simple_loss=0.2583, pruned_loss=0.06051, over 4788.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2648, pruned_loss=0.06826, over 789034.20 frames. ], batch size: 29, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:28:54,280 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.052e+02 1.678e+02 2.044e+02 2.443e+02 3.814e+02, threshold=4.089e+02, percent-clipped=0.0 +2023-03-26 11:29:04,644 INFO [finetune.py:976] (5/7) Epoch 10, batch 400, loss[loss=0.1959, simple_loss=0.2623, pruned_loss=0.06476, over 4923.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2667, pruned_loss=0.06881, over 826771.32 frames. ], batch size: 33, lr: 3.76e-03, grad_scale: 16.0 +2023-03-26 11:29:56,981 INFO [finetune.py:976] (5/7) Epoch 10, batch 450, loss[loss=0.188, simple_loss=0.2513, pruned_loss=0.06241, over 4837.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2667, pruned_loss=0.06924, over 854918.57 frames. ], batch size: 47, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:30:21,164 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.724e+02 2.025e+02 2.574e+02 4.346e+02, threshold=4.050e+02, percent-clipped=1.0 +2023-03-26 11:30:24,986 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52038.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:30:26,195 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52040.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:30:30,960 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52048.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:30:30,986 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8159, 2.0836, 1.5588, 1.5700, 2.1966, 2.1657, 1.9764, 1.8840], + device='cuda:5'), covar=tensor([0.0393, 0.0302, 0.0544, 0.0357, 0.0291, 0.0482, 0.0273, 0.0360], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0139, 0.0115, 0.0102, 0.0101, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0280e-05, 8.4918e-05, 1.1057e-04, 8.9866e-05, 8.0185e-05, 7.5028e-05, + 6.8834e-05, 8.3125e-05], device='cuda:5') +2023-03-26 11:30:31,463 INFO [finetune.py:976] (5/7) Epoch 10, batch 500, loss[loss=0.1611, simple_loss=0.221, pruned_loss=0.05062, over 4825.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.263, pruned_loss=0.06799, over 878896.31 frames. ], batch size: 41, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:30:56,806 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=52086.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:31:02,789 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=52096.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:31:03,434 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.7587, 3.2462, 3.4438, 3.5827, 3.5574, 3.3050, 3.7879, 1.3025], + device='cuda:5'), covar=tensor([0.0721, 0.0891, 0.0828, 0.0987, 0.1077, 0.1558, 0.0841, 0.5098], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0248, 0.0279, 0.0295, 0.0332, 0.0283, 0.0304, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:31:04,604 INFO [finetune.py:976] (5/7) Epoch 10, batch 550, loss[loss=0.1846, simple_loss=0.2568, pruned_loss=0.05624, over 4820.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2602, pruned_loss=0.0668, over 894510.41 frames. ], batch size: 41, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:31:05,941 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52101.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:31:07,069 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52103.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:31:10,794 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7667, 1.6580, 1.5419, 1.7989, 2.0731, 1.8300, 1.5039, 1.5372], + device='cuda:5'), covar=tensor([0.1637, 0.1629, 0.1523, 0.1364, 0.1405, 0.1048, 0.2279, 0.1522], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0208, 0.0207, 0.0188, 0.0240, 0.0180, 0.0213, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:31:14,215 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5905, 3.5609, 3.4138, 1.8267, 3.6585, 2.7633, 1.0228, 2.4133], + device='cuda:5'), covar=tensor([0.2754, 0.2330, 0.1718, 0.3544, 0.1155, 0.1150, 0.4404, 0.1839], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0175, 0.0161, 0.0129, 0.0157, 0.0123, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 11:31:16,079 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0487, 2.0030, 1.5015, 0.6440, 1.6672, 1.7368, 1.5397, 1.8452], + device='cuda:5'), covar=tensor([0.0931, 0.0622, 0.1264, 0.1764, 0.1348, 0.2004, 0.1982, 0.0769], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0203, 0.0204, 0.0191, 0.0218, 0.0210, 0.0226, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:31:27,062 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.020e+02 1.591e+02 1.822e+02 2.163e+02 6.487e+02, threshold=3.643e+02, percent-clipped=1.0 +2023-03-26 11:31:27,846 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1643, 1.8366, 1.7480, 2.0276, 2.7048, 2.0728, 1.9662, 1.6509], + device='cuda:5'), covar=tensor([0.1859, 0.1927, 0.1669, 0.1547, 0.1665, 0.1034, 0.2027, 0.1744], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0210, 0.0208, 0.0189, 0.0242, 0.0181, 0.0214, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:31:31,818 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-26 11:31:37,962 INFO [finetune.py:976] (5/7) Epoch 10, batch 600, loss[loss=0.1589, simple_loss=0.2249, pruned_loss=0.04643, over 4769.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2604, pruned_loss=0.06725, over 907524.53 frames. ], batch size: 28, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:31:39,240 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52151.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:32:15,974 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52193.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:32:19,560 INFO [finetune.py:976] (5/7) Epoch 10, batch 650, loss[loss=0.198, simple_loss=0.2657, pruned_loss=0.0652, over 4710.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2636, pruned_loss=0.06795, over 920241.72 frames. ], batch size: 59, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:32:19,619 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=52199.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:32:35,489 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7553, 0.6299, 1.6031, 1.5663, 1.4490, 1.3792, 1.3639, 1.5833], + device='cuda:5'), covar=tensor([0.4541, 0.4938, 0.4345, 0.4558, 0.5384, 0.4300, 0.5468, 0.4210], + device='cuda:5'), in_proj_covar=tensor([0.0234, 0.0240, 0.0253, 0.0256, 0.0251, 0.0226, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:32:42,611 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.681e+02 1.969e+02 2.336e+02 3.855e+02, threshold=3.938e+02, percent-clipped=2.0 +2023-03-26 11:32:53,489 INFO [finetune.py:976] (5/7) Epoch 10, batch 700, loss[loss=0.1843, simple_loss=0.2739, pruned_loss=0.04731, over 4910.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2661, pruned_loss=0.06873, over 928828.13 frames. ], batch size: 42, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:32:56,646 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52254.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:33:10,601 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 11:33:10,786 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.71 vs. limit=5.0 +2023-03-26 11:33:26,704 INFO [finetune.py:976] (5/7) Epoch 10, batch 750, loss[loss=0.1878, simple_loss=0.2633, pruned_loss=0.05614, over 4805.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2666, pruned_loss=0.06833, over 935795.21 frames. ], batch size: 40, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:33:45,065 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52312.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:34:02,791 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.612e+02 1.864e+02 2.364e+02 4.342e+02, threshold=3.728e+02, percent-clipped=1.0 +2023-03-26 11:34:15,213 INFO [finetune.py:976] (5/7) Epoch 10, batch 800, loss[loss=0.185, simple_loss=0.2491, pruned_loss=0.06046, over 4830.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2666, pruned_loss=0.06794, over 941071.89 frames. ], batch size: 39, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:34:30,569 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52373.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:34:49,110 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52396.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:34:50,910 INFO [finetune.py:976] (5/7) Epoch 10, batch 850, loss[loss=0.2287, simple_loss=0.2881, pruned_loss=0.08472, over 4902.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2653, pruned_loss=0.06808, over 944703.61 frames. ], batch size: 37, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:34:54,134 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52403.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:35:14,904 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.098e+02 1.556e+02 1.848e+02 2.239e+02 3.627e+02, threshold=3.695e+02, percent-clipped=0.0 +2023-03-26 11:35:26,401 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 11:35:36,903 INFO [finetune.py:976] (5/7) Epoch 10, batch 900, loss[loss=0.1555, simple_loss=0.2209, pruned_loss=0.04502, over 4828.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.261, pruned_loss=0.06643, over 948304.99 frames. ], batch size: 33, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:35:38,214 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=52451.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:35:59,106 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4213, 1.4212, 1.4507, 0.7623, 1.5391, 1.5475, 1.3965, 1.3178], + device='cuda:5'), covar=tensor([0.0587, 0.0691, 0.0681, 0.0902, 0.0779, 0.0656, 0.0652, 0.1146], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0132, 0.0142, 0.0123, 0.0118, 0.0141, 0.0141, 0.0159], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:36:16,375 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 11:36:25,702 INFO [finetune.py:976] (5/7) Epoch 10, batch 950, loss[loss=0.2201, simple_loss=0.2909, pruned_loss=0.07461, over 4914.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2597, pruned_loss=0.06585, over 949914.45 frames. ], batch size: 36, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:36:45,068 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3163, 2.1244, 1.8851, 2.3666, 2.1809, 2.1647, 2.1637, 3.0384], + device='cuda:5'), covar=tensor([0.4666, 0.6276, 0.3858, 0.5182, 0.5428, 0.2846, 0.5027, 0.1876], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0259, 0.0222, 0.0279, 0.0243, 0.0208, 0.0245, 0.0212], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:36:46,734 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.551e+02 1.918e+02 2.238e+02 5.409e+02, threshold=3.837e+02, percent-clipped=4.0 +2023-03-26 11:37:01,195 INFO [finetune.py:976] (5/7) Epoch 10, batch 1000, loss[loss=0.2452, simple_loss=0.3043, pruned_loss=0.09299, over 4827.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2619, pruned_loss=0.0667, over 951770.41 frames. ], batch size: 33, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:37:01,275 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52549.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:37:04,950 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52555.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:37:05,256 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-03-26 11:37:07,383 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 11:38:00,467 INFO [finetune.py:976] (5/7) Epoch 10, batch 1050, loss[loss=0.2315, simple_loss=0.2944, pruned_loss=0.0843, over 4816.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2646, pruned_loss=0.06693, over 951977.52 frames. ], batch size: 40, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:38:09,720 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3608, 1.4373, 1.2341, 1.4424, 1.6013, 1.5264, 1.4144, 1.2787], + device='cuda:5'), covar=tensor([0.0339, 0.0250, 0.0536, 0.0237, 0.0206, 0.0467, 0.0296, 0.0348], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0139, 0.0114, 0.0102, 0.0102, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0390e-05, 8.4967e-05, 1.1073e-04, 8.9827e-05, 7.9787e-05, 7.5280e-05, + 6.8853e-05, 8.3132e-05], device='cuda:5') +2023-03-26 11:38:19,738 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 11:38:21,490 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52616.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 11:38:31,486 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.240e+02 1.591e+02 1.928e+02 2.293e+02 3.930e+02, threshold=3.855e+02, percent-clipped=1.0 +2023-03-26 11:38:38,205 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 11:38:44,945 INFO [finetune.py:976] (5/7) Epoch 10, batch 1100, loss[loss=0.2217, simple_loss=0.2779, pruned_loss=0.08272, over 4814.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2661, pruned_loss=0.06754, over 953759.23 frames. ], batch size: 38, lr: 3.76e-03, grad_scale: 32.0 +2023-03-26 11:38:59,647 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52668.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:39:17,761 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52696.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:39:19,453 INFO [finetune.py:976] (5/7) Epoch 10, batch 1150, loss[loss=0.1828, simple_loss=0.2642, pruned_loss=0.05071, over 4770.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2662, pruned_loss=0.06718, over 954081.07 frames. ], batch size: 28, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:39:40,840 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.071e+02 1.654e+02 1.930e+02 2.314e+02 4.484e+02, threshold=3.861e+02, percent-clipped=2.0 +2023-03-26 11:39:48,716 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=52744.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:39:52,581 INFO [finetune.py:976] (5/7) Epoch 10, batch 1200, loss[loss=0.2028, simple_loss=0.2703, pruned_loss=0.06765, over 4814.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2642, pruned_loss=0.06607, over 955344.34 frames. ], batch size: 39, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:39:57,316 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52752.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:40:35,761 INFO [finetune.py:976] (5/7) Epoch 10, batch 1250, loss[loss=0.1757, simple_loss=0.2354, pruned_loss=0.05795, over 4761.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2616, pruned_loss=0.06578, over 954548.74 frames. ], batch size: 28, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:40:47,086 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52813.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:41:05,427 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.514e+02 1.794e+02 2.223e+02 4.744e+02, threshold=3.588e+02, percent-clipped=2.0 +2023-03-26 11:41:19,447 INFO [finetune.py:976] (5/7) Epoch 10, batch 1300, loss[loss=0.2062, simple_loss=0.2546, pruned_loss=0.07888, over 4714.00 frames. ], tot_loss[loss=0.1931, simple_loss=0.2577, pruned_loss=0.06426, over 956456.40 frames. ], batch size: 59, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:41:19,555 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52849.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:41:51,931 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=52897.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:41:53,108 INFO [finetune.py:976] (5/7) Epoch 10, batch 1350, loss[loss=0.1955, simple_loss=0.2605, pruned_loss=0.06527, over 4885.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2589, pruned_loss=0.06522, over 957018.86 frames. ], batch size: 32, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:42:00,051 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 11:42:02,436 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52911.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:42:15,926 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.950e+01 1.660e+02 2.003e+02 2.564e+02 3.985e+02, threshold=4.006e+02, percent-clipped=2.0 +2023-03-26 11:42:22,670 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 11:42:30,879 INFO [finetune.py:976] (5/7) Epoch 10, batch 1400, loss[loss=0.2551, simple_loss=0.3172, pruned_loss=0.0965, over 4837.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2639, pruned_loss=0.0675, over 957478.88 frames. ], batch size: 47, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:42:48,560 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52968.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:42:52,257 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0230, 1.8291, 1.7599, 2.0809, 2.3536, 2.1998, 1.5517, 1.7524], + device='cuda:5'), covar=tensor([0.2156, 0.2065, 0.1851, 0.1560, 0.1566, 0.1014, 0.2438, 0.1875], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0209, 0.0207, 0.0188, 0.0241, 0.0180, 0.0213, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:42:55,911 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8276, 1.3871, 1.8528, 1.7610, 1.5765, 1.5766, 1.6866, 1.6213], + device='cuda:5'), covar=tensor([0.4481, 0.4772, 0.3699, 0.4401, 0.5280, 0.3980, 0.5412, 0.3931], + device='cuda:5'), in_proj_covar=tensor([0.0234, 0.0239, 0.0253, 0.0256, 0.0251, 0.0227, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:43:14,419 INFO [finetune.py:976] (5/7) Epoch 10, batch 1450, loss[loss=0.1864, simple_loss=0.2607, pruned_loss=0.05603, over 4912.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2647, pruned_loss=0.06723, over 955909.82 frames. ], batch size: 42, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:43:22,376 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 11:43:35,021 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=53016.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:43:45,115 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.022e+02 1.605e+02 1.913e+02 2.318e+02 4.347e+02, threshold=3.826e+02, percent-clipped=3.0 +2023-03-26 11:43:55,917 INFO [finetune.py:976] (5/7) Epoch 10, batch 1500, loss[loss=0.2186, simple_loss=0.2808, pruned_loss=0.07818, over 4894.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2675, pruned_loss=0.06872, over 955364.44 frames. ], batch size: 35, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:44:00,677 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53056.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:44:04,361 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6537, 1.5340, 1.3636, 1.4879, 1.9705, 1.8661, 1.5912, 1.4118], + device='cuda:5'), covar=tensor([0.0295, 0.0286, 0.0524, 0.0283, 0.0216, 0.0328, 0.0272, 0.0357], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0108, 0.0138, 0.0114, 0.0101, 0.0101, 0.0091, 0.0107], + device='cuda:5'), out_proj_covar=tensor([6.9878e-05, 8.4556e-05, 1.0953e-04, 8.9396e-05, 7.8924e-05, 7.4999e-05, + 6.8609e-05, 8.2518e-05], device='cuda:5') +2023-03-26 11:44:16,364 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7947, 1.7263, 1.5673, 1.9073, 2.3192, 1.9539, 1.4835, 1.4772], + device='cuda:5'), covar=tensor([0.2333, 0.2138, 0.2034, 0.1648, 0.1758, 0.1209, 0.2551, 0.2037], + device='cuda:5'), in_proj_covar=tensor([0.0235, 0.0207, 0.0206, 0.0187, 0.0239, 0.0179, 0.0212, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:44:22,466 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1630, 2.0751, 1.7123, 2.1782, 2.0052, 1.9400, 1.9687, 2.8647], + device='cuda:5'), covar=tensor([0.4515, 0.6022, 0.4142, 0.5295, 0.5082, 0.2801, 0.5228, 0.1865], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0222, 0.0279, 0.0244, 0.0209, 0.0245, 0.0213], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:44:29,473 INFO [finetune.py:976] (5/7) Epoch 10, batch 1550, loss[loss=0.1946, simple_loss=0.2739, pruned_loss=0.05771, over 4687.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2669, pruned_loss=0.06814, over 955427.70 frames. ], batch size: 54, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:44:35,500 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53108.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:44:41,520 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53117.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:44:52,482 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.115e+02 1.652e+02 2.005e+02 2.543e+02 4.651e+02, threshold=4.009e+02, percent-clipped=4.0 +2023-03-26 11:45:03,286 INFO [finetune.py:976] (5/7) Epoch 10, batch 1600, loss[loss=0.1677, simple_loss=0.2292, pruned_loss=0.05312, over 4828.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2639, pruned_loss=0.06673, over 955043.43 frames. ], batch size: 30, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:45:19,298 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7854, 1.4876, 2.1325, 3.4155, 2.3923, 2.4223, 0.9004, 2.6300], + device='cuda:5'), covar=tensor([0.1601, 0.1517, 0.1319, 0.0557, 0.0751, 0.1490, 0.1995, 0.0536], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0118, 0.0134, 0.0165, 0.0102, 0.0139, 0.0127, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 11:45:34,430 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.67 vs. limit=2.0 +2023-03-26 11:45:48,100 INFO [finetune.py:976] (5/7) Epoch 10, batch 1650, loss[loss=0.2177, simple_loss=0.2747, pruned_loss=0.08041, over 4825.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2616, pruned_loss=0.06614, over 955568.25 frames. ], batch size: 33, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:45:48,526 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-26 11:45:56,051 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53211.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 11:46:10,716 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.364e+01 1.592e+02 1.774e+02 2.189e+02 3.836e+02, threshold=3.549e+02, percent-clipped=0.0 +2023-03-26 11:46:16,362 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53241.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:46:23,558 INFO [finetune.py:976] (5/7) Epoch 10, batch 1700, loss[loss=0.21, simple_loss=0.269, pruned_loss=0.07551, over 4746.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2589, pruned_loss=0.06527, over 955863.07 frames. ], batch size: 26, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:46:29,715 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=53259.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 11:46:43,962 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9885, 1.9761, 1.6398, 1.9213, 1.9375, 1.6152, 2.2852, 2.0163], + device='cuda:5'), covar=tensor([0.1331, 0.2121, 0.3013, 0.2756, 0.2839, 0.1677, 0.3237, 0.1766], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0187, 0.0231, 0.0252, 0.0238, 0.0194, 0.0210, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:46:50,503 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5916, 1.4525, 1.3361, 1.4648, 1.7901, 1.6648, 1.4645, 1.2890], + device='cuda:5'), covar=tensor([0.0285, 0.0250, 0.0558, 0.0241, 0.0176, 0.0467, 0.0317, 0.0345], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0139, 0.0114, 0.0101, 0.0102, 0.0091, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0335e-05, 8.4783e-05, 1.1049e-04, 8.9657e-05, 7.9367e-05, 7.5638e-05, + 6.9155e-05, 8.3285e-05], device='cuda:5') +2023-03-26 11:46:56,427 INFO [finetune.py:976] (5/7) Epoch 10, batch 1750, loss[loss=0.1837, simple_loss=0.2535, pruned_loss=0.05696, over 4911.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2613, pruned_loss=0.06625, over 956185.26 frames. ], batch size: 37, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:46:58,868 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53302.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:47:00,083 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53304.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:47:07,271 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53315.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:47:09,108 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9629, 1.7308, 1.4971, 1.7122, 1.6647, 1.7116, 1.6885, 2.4569], + device='cuda:5'), covar=tensor([0.4349, 0.5440, 0.3971, 0.4895, 0.4791, 0.2686, 0.4838, 0.1873], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0222, 0.0279, 0.0243, 0.0209, 0.0244, 0.0212], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:47:18,948 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.172e+02 1.605e+02 1.832e+02 2.176e+02 4.638e+02, threshold=3.664e+02, percent-clipped=2.0 +2023-03-26 11:47:29,900 INFO [finetune.py:976] (5/7) Epoch 10, batch 1800, loss[loss=0.2248, simple_loss=0.2888, pruned_loss=0.08035, over 4821.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2643, pruned_loss=0.06709, over 954787.58 frames. ], batch size: 39, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:47:45,407 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53365.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:47:46,669 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53367.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:47:56,036 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53376.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:48:26,357 INFO [finetune.py:976] (5/7) Epoch 10, batch 1850, loss[loss=0.2193, simple_loss=0.2894, pruned_loss=0.07455, over 4846.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2663, pruned_loss=0.06768, over 956635.37 frames. ], batch size: 47, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:48:32,693 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53408.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:48:35,084 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53412.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:48:51,324 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53428.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 11:48:58,642 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.752e+02 2.111e+02 2.637e+02 7.323e+02, threshold=4.222e+02, percent-clipped=6.0 +2023-03-26 11:49:10,462 INFO [finetune.py:976] (5/7) Epoch 10, batch 1900, loss[loss=0.1986, simple_loss=0.2684, pruned_loss=0.0644, over 4783.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2676, pruned_loss=0.06819, over 957457.73 frames. ], batch size: 45, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:49:12,967 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9587, 4.3579, 4.2813, 2.3765, 4.5338, 3.4043, 0.8009, 3.0827], + device='cuda:5'), covar=tensor([0.2357, 0.1516, 0.1210, 0.2808, 0.0736, 0.0862, 0.4249, 0.1275], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0174, 0.0161, 0.0129, 0.0157, 0.0123, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 11:49:14,814 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=53456.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:49:18,801 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-26 11:49:29,814 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9999, 1.2737, 0.7806, 1.9759, 2.2866, 1.7713, 1.7076, 1.9081], + device='cuda:5'), covar=tensor([0.1333, 0.2030, 0.2342, 0.1063, 0.1940, 0.2178, 0.1344, 0.1843], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0094, 0.0111, 0.0092, 0.0121, 0.0094, 0.0098, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 11:49:36,181 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7243, 1.5232, 1.5680, 1.6299, 1.1429, 3.7128, 1.4226, 1.9133], + device='cuda:5'), covar=tensor([0.3365, 0.2477, 0.2161, 0.2310, 0.1866, 0.0161, 0.2656, 0.1355], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0123, 0.0117, 0.0099, 0.0100, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 11:49:38,155 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.44 vs. limit=5.0 +2023-03-26 11:49:43,867 INFO [finetune.py:976] (5/7) Epoch 10, batch 1950, loss[loss=0.1607, simple_loss=0.2286, pruned_loss=0.04641, over 4855.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2657, pruned_loss=0.06695, over 955868.15 frames. ], batch size: 31, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:49:45,137 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7474, 3.5125, 3.4152, 1.7818, 3.6855, 2.7320, 0.9701, 2.4246], + device='cuda:5'), covar=tensor([0.2776, 0.2073, 0.1593, 0.3189, 0.1177, 0.1056, 0.4179, 0.1688], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0174, 0.0161, 0.0129, 0.0157, 0.0123, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 11:49:56,148 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-26 11:50:09,884 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.065e+02 1.535e+02 1.778e+02 2.101e+02 3.650e+02, threshold=3.555e+02, percent-clipped=0.0 +2023-03-26 11:50:29,329 INFO [finetune.py:976] (5/7) Epoch 10, batch 2000, loss[loss=0.1502, simple_loss=0.2064, pruned_loss=0.04698, over 4218.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2615, pruned_loss=0.06524, over 954991.04 frames. ], batch size: 18, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:51:22,348 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53597.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:51:23,505 INFO [finetune.py:976] (5/7) Epoch 10, batch 2050, loss[loss=0.1874, simple_loss=0.2544, pruned_loss=0.06015, over 4783.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2586, pruned_loss=0.0644, over 956082.26 frames. ], batch size: 28, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:51:35,947 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 11:51:44,829 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.079e+02 1.628e+02 1.908e+02 2.249e+02 5.707e+02, threshold=3.816e+02, percent-clipped=1.0 +2023-03-26 11:51:49,806 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0967, 1.5716, 1.2062, 2.0270, 2.3814, 1.5819, 1.9013, 1.9450], + device='cuda:5'), covar=tensor([0.1345, 0.2023, 0.1935, 0.1070, 0.1739, 0.1922, 0.1395, 0.1782], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0113, 0.0093, 0.0122, 0.0095, 0.0099, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 11:51:56,176 INFO [finetune.py:976] (5/7) Epoch 10, batch 2100, loss[loss=0.1802, simple_loss=0.2595, pruned_loss=0.05041, over 4818.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2575, pruned_loss=0.06414, over 955235.36 frames. ], batch size: 40, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:52:03,971 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53660.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:52:13,508 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53671.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:52:31,724 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-03-26 11:52:37,542 INFO [finetune.py:976] (5/7) Epoch 10, batch 2150, loss[loss=0.2214, simple_loss=0.2931, pruned_loss=0.07484, over 4865.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.262, pruned_loss=0.06654, over 954867.86 frames. ], batch size: 34, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:52:52,714 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53712.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:52:58,876 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-03-26 11:53:08,711 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53723.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 11:53:19,767 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.764e+02 2.057e+02 2.459e+02 5.535e+02, threshold=4.114e+02, percent-clipped=2.0 +2023-03-26 11:53:23,365 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7956, 3.6923, 3.5406, 1.9421, 3.7542, 2.8707, 0.9546, 2.5448], + device='cuda:5'), covar=tensor([0.2348, 0.1866, 0.1528, 0.3026, 0.1163, 0.0991, 0.4204, 0.1441], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0172, 0.0158, 0.0126, 0.0154, 0.0121, 0.0143, 0.0120], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 11:53:34,460 INFO [finetune.py:976] (5/7) Epoch 10, batch 2200, loss[loss=0.2066, simple_loss=0.2648, pruned_loss=0.07419, over 4897.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2654, pruned_loss=0.06805, over 955374.18 frames. ], batch size: 36, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:53:43,157 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=53760.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:53:48,153 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2381, 2.2024, 1.9540, 2.2488, 2.8923, 2.3122, 2.0997, 1.7403], + device='cuda:5'), covar=tensor([0.2190, 0.1988, 0.1954, 0.1796, 0.1682, 0.1064, 0.2161, 0.2038], + device='cuda:5'), in_proj_covar=tensor([0.0233, 0.0205, 0.0205, 0.0186, 0.0238, 0.0178, 0.0210, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:54:07,989 INFO [finetune.py:976] (5/7) Epoch 10, batch 2250, loss[loss=0.1994, simple_loss=0.2487, pruned_loss=0.07503, over 4705.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.266, pruned_loss=0.06841, over 954563.62 frames. ], batch size: 23, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:54:21,776 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0563, 1.7699, 1.5978, 1.7551, 1.7302, 1.6605, 1.7273, 2.4860], + device='cuda:5'), covar=tensor([0.4409, 0.5385, 0.3727, 0.4850, 0.4754, 0.2854, 0.4671, 0.1753], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0222, 0.0279, 0.0243, 0.0209, 0.0244, 0.0211], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:54:30,210 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.107e+02 1.658e+02 1.958e+02 2.430e+02 3.560e+02, threshold=3.915e+02, percent-clipped=0.0 +2023-03-26 11:54:41,559 INFO [finetune.py:976] (5/7) Epoch 10, batch 2300, loss[loss=0.1854, simple_loss=0.2583, pruned_loss=0.05629, over 4889.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2656, pruned_loss=0.06748, over 954321.48 frames. ], batch size: 35, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:55:15,981 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53897.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:55:17,110 INFO [finetune.py:976] (5/7) Epoch 10, batch 2350, loss[loss=0.2268, simple_loss=0.275, pruned_loss=0.08932, over 4754.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2638, pruned_loss=0.06663, over 953188.34 frames. ], batch size: 26, lr: 3.75e-03, grad_scale: 32.0 +2023-03-26 11:55:23,490 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5718, 2.3893, 1.9746, 1.0148, 2.2008, 1.9727, 1.8219, 2.2138], + device='cuda:5'), covar=tensor([0.0904, 0.0763, 0.1516, 0.2095, 0.1685, 0.2205, 0.2142, 0.0987], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0201, 0.0202, 0.0188, 0.0217, 0.0208, 0.0224, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:55:47,268 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.820e+01 1.627e+02 1.969e+02 2.442e+02 4.599e+02, threshold=3.938e+02, percent-clipped=2.0 +2023-03-26 11:55:58,285 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=53945.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:56:05,529 INFO [finetune.py:976] (5/7) Epoch 10, batch 2400, loss[loss=0.2221, simple_loss=0.28, pruned_loss=0.08207, over 4849.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2612, pruned_loss=0.0659, over 954417.65 frames. ], batch size: 47, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 11:56:15,984 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53960.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:56:24,612 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53971.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:56:29,996 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2962, 2.9226, 2.6407, 1.4867, 2.8479, 2.3678, 2.2446, 2.5690], + device='cuda:5'), covar=tensor([0.0919, 0.0902, 0.1933, 0.2238, 0.1807, 0.2088, 0.2229, 0.1286], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0201, 0.0203, 0.0188, 0.0216, 0.0208, 0.0225, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:56:41,931 INFO [finetune.py:976] (5/7) Epoch 10, batch 2450, loss[loss=0.1773, simple_loss=0.2481, pruned_loss=0.05323, over 4757.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2588, pruned_loss=0.06509, over 955668.47 frames. ], batch size: 54, lr: 3.74e-03, grad_scale: 64.0 +2023-03-26 11:56:49,155 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=54008.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:56:53,947 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8972, 4.0449, 3.7760, 2.1622, 4.1020, 3.0782, 0.9449, 2.8590], + device='cuda:5'), covar=tensor([0.2321, 0.1953, 0.1675, 0.3193, 0.0938, 0.1051, 0.4655, 0.1541], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0173, 0.0160, 0.0128, 0.0156, 0.0123, 0.0145, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 11:56:55,292 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 11:56:56,920 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=54019.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:56:59,870 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=54023.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 11:57:05,184 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.615e+02 1.942e+02 2.268e+02 4.833e+02, threshold=3.884e+02, percent-clipped=2.0 +2023-03-26 11:57:16,019 INFO [finetune.py:976] (5/7) Epoch 10, batch 2500, loss[loss=0.2116, simple_loss=0.2768, pruned_loss=0.07326, over 4834.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2596, pruned_loss=0.06547, over 955873.87 frames. ], batch size: 47, lr: 3.74e-03, grad_scale: 64.0 +2023-03-26 11:57:34,829 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.54 vs. limit=5.0 +2023-03-26 11:57:42,139 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=54071.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 11:57:50,245 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-03-26 11:57:50,642 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6721, 1.4803, 0.9950, 0.2354, 1.2450, 1.4621, 1.4037, 1.3923], + device='cuda:5'), covar=tensor([0.0886, 0.0892, 0.1482, 0.2066, 0.1521, 0.2556, 0.2378, 0.0903], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0199, 0.0201, 0.0187, 0.0215, 0.0206, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 11:58:00,118 INFO [finetune.py:976] (5/7) Epoch 10, batch 2550, loss[loss=0.1673, simple_loss=0.242, pruned_loss=0.04629, over 4723.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2628, pruned_loss=0.06608, over 955549.68 frames. ], batch size: 54, lr: 3.74e-03, grad_scale: 64.0 +2023-03-26 11:58:35,816 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.936e+01 1.671e+02 2.051e+02 2.356e+02 3.900e+02, threshold=4.103e+02, percent-clipped=1.0 +2023-03-26 11:58:38,362 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9005, 3.8858, 3.6009, 1.9772, 3.9546, 3.0361, 1.0054, 2.7514], + device='cuda:5'), covar=tensor([0.2126, 0.1786, 0.1557, 0.3167, 0.0895, 0.0965, 0.4525, 0.1305], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0175, 0.0161, 0.0129, 0.0157, 0.0123, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 11:58:46,744 INFO [finetune.py:976] (5/7) Epoch 10, batch 2600, loss[loss=0.1778, simple_loss=0.2417, pruned_loss=0.05695, over 4742.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2635, pruned_loss=0.06615, over 955686.57 frames. ], batch size: 27, lr: 3.74e-03, grad_scale: 64.0 +2023-03-26 11:59:19,477 INFO [finetune.py:976] (5/7) Epoch 10, batch 2650, loss[loss=0.1878, simple_loss=0.2635, pruned_loss=0.05607, over 4838.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2662, pruned_loss=0.06758, over 953656.86 frames. ], batch size: 47, lr: 3.74e-03, grad_scale: 64.0 +2023-03-26 11:59:43,744 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.585e+01 1.566e+02 1.779e+02 2.159e+02 3.883e+02, threshold=3.557e+02, percent-clipped=0.0 +2023-03-26 11:59:53,476 INFO [finetune.py:976] (5/7) Epoch 10, batch 2700, loss[loss=0.1877, simple_loss=0.2636, pruned_loss=0.05588, over 4750.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2651, pruned_loss=0.06645, over 955101.33 frames. ], batch size: 26, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:00:20,019 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7896, 1.6655, 2.2295, 1.9952, 1.8277, 4.3862, 1.5601, 1.8918], + device='cuda:5'), covar=tensor([0.1124, 0.1981, 0.1175, 0.1135, 0.1881, 0.0279, 0.1712, 0.1922], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0080, 0.0075, 0.0077, 0.0090, 0.0082, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:00:26,578 INFO [finetune.py:976] (5/7) Epoch 10, batch 2750, loss[loss=0.262, simple_loss=0.3027, pruned_loss=0.1106, over 4830.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.263, pruned_loss=0.06632, over 955516.40 frames. ], batch size: 33, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:00:32,435 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.63 vs. limit=5.0 +2023-03-26 12:00:50,902 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.545e+02 1.929e+02 2.415e+02 3.548e+02, threshold=3.859e+02, percent-clipped=0.0 +2023-03-26 12:01:01,571 INFO [finetune.py:976] (5/7) Epoch 10, batch 2800, loss[loss=0.1843, simple_loss=0.2484, pruned_loss=0.06016, over 4788.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2593, pruned_loss=0.06505, over 955665.16 frames. ], batch size: 51, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:01:48,151 INFO [finetune.py:976] (5/7) Epoch 10, batch 2850, loss[loss=0.2167, simple_loss=0.2763, pruned_loss=0.07855, over 4925.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2572, pruned_loss=0.06442, over 955128.41 frames. ], batch size: 38, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:02:10,453 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.958e+01 1.628e+02 1.894e+02 2.190e+02 3.699e+02, threshold=3.787e+02, percent-clipped=0.0 +2023-03-26 12:02:22,199 INFO [finetune.py:976] (5/7) Epoch 10, batch 2900, loss[loss=0.1788, simple_loss=0.2369, pruned_loss=0.06037, over 4765.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2596, pruned_loss=0.06562, over 953916.41 frames. ], batch size: 28, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:02:57,316 INFO [finetune.py:976] (5/7) Epoch 10, batch 2950, loss[loss=0.2426, simple_loss=0.3022, pruned_loss=0.09149, over 4891.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2618, pruned_loss=0.06633, over 954301.01 frames. ], batch size: 32, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:03:09,670 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 12:03:18,744 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.694e+02 2.010e+02 2.318e+02 4.609e+02, threshold=4.019e+02, percent-clipped=2.0 +2023-03-26 12:03:40,010 INFO [finetune.py:976] (5/7) Epoch 10, batch 3000, loss[loss=0.2145, simple_loss=0.2764, pruned_loss=0.07633, over 4731.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2635, pruned_loss=0.06669, over 953917.78 frames. ], batch size: 59, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:03:40,010 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 12:03:48,861 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6808, 1.5532, 1.6101, 1.6428, 1.0499, 3.0027, 1.1406, 1.5749], + device='cuda:5'), covar=tensor([0.3252, 0.2335, 0.1998, 0.2261, 0.1858, 0.0250, 0.2677, 0.1274], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0123, 0.0116, 0.0099, 0.0100, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:03:56,631 INFO [finetune.py:1010] (5/7) Epoch 10, validation: loss=0.1584, simple_loss=0.2295, pruned_loss=0.04366, over 2265189.00 frames. +2023-03-26 12:03:56,633 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 12:04:02,547 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3504, 1.2598, 1.6432, 1.1291, 1.3710, 1.4528, 1.2505, 1.6724], + device='cuda:5'), covar=tensor([0.1029, 0.1808, 0.0986, 0.1381, 0.0773, 0.1117, 0.2672, 0.0652], + device='cuda:5'), in_proj_covar=tensor([0.0195, 0.0203, 0.0190, 0.0189, 0.0176, 0.0213, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:04:03,102 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6217, 3.4430, 3.2562, 1.6602, 3.4996, 2.6010, 0.8067, 2.3029], + device='cuda:5'), covar=tensor([0.2161, 0.1619, 0.1341, 0.2898, 0.1006, 0.0944, 0.3980, 0.1245], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0173, 0.0159, 0.0128, 0.0155, 0.0122, 0.0145, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 12:04:29,070 INFO [finetune.py:976] (5/7) Epoch 10, batch 3050, loss[loss=0.1738, simple_loss=0.2498, pruned_loss=0.0489, over 4758.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.265, pruned_loss=0.0666, over 953430.92 frames. ], batch size: 28, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:04:32,735 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6213, 1.1387, 0.9245, 1.6255, 1.9552, 1.5536, 1.4499, 1.6652], + device='cuda:5'), covar=tensor([0.1449, 0.2070, 0.2100, 0.1139, 0.2215, 0.2160, 0.1517, 0.1758], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0112, 0.0092, 0.0120, 0.0094, 0.0099, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:04:52,094 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.154e+02 1.606e+02 1.839e+02 2.259e+02 4.011e+02, threshold=3.679e+02, percent-clipped=0.0 +2023-03-26 12:04:54,597 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 12:05:02,812 INFO [finetune.py:976] (5/7) Epoch 10, batch 3100, loss[loss=0.2139, simple_loss=0.2708, pruned_loss=0.07855, over 4757.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2641, pruned_loss=0.06654, over 954178.95 frames. ], batch size: 27, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:05:08,755 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5048, 1.4534, 1.3226, 1.4572, 1.7830, 1.7250, 1.5298, 1.3354], + device='cuda:5'), covar=tensor([0.0313, 0.0277, 0.0576, 0.0287, 0.0225, 0.0399, 0.0328, 0.0376], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0108, 0.0137, 0.0113, 0.0100, 0.0102, 0.0091, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.0437e-05, 8.4430e-05, 1.0933e-04, 8.8378e-05, 7.8553e-05, 7.5518e-05, + 6.8818e-05, 8.2035e-05], device='cuda:5') +2023-03-26 12:05:14,429 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54664.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 12:05:24,936 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 12:05:36,461 INFO [finetune.py:976] (5/7) Epoch 10, batch 3150, loss[loss=0.1677, simple_loss=0.2303, pruned_loss=0.05249, over 4778.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2615, pruned_loss=0.06542, over 955086.62 frames. ], batch size: 26, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:05:42,904 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2028, 1.8077, 2.4876, 1.4909, 2.1962, 2.3463, 1.7912, 2.6459], + device='cuda:5'), covar=tensor([0.1289, 0.2108, 0.1363, 0.2246, 0.1045, 0.1566, 0.2628, 0.0887], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0203, 0.0191, 0.0189, 0.0176, 0.0213, 0.0214, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:05:54,671 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54725.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 12:05:59,383 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.653e+02 1.993e+02 2.298e+02 5.311e+02, threshold=3.986e+02, percent-clipped=2.0 +2023-03-26 12:06:10,109 INFO [finetune.py:976] (5/7) Epoch 10, batch 3200, loss[loss=0.1774, simple_loss=0.244, pruned_loss=0.05546, over 4910.00 frames. ], tot_loss[loss=0.1928, simple_loss=0.2577, pruned_loss=0.06391, over 955824.54 frames. ], batch size: 35, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:06:53,308 INFO [finetune.py:976] (5/7) Epoch 10, batch 3250, loss[loss=0.1987, simple_loss=0.2542, pruned_loss=0.07165, over 4810.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2589, pruned_loss=0.06451, over 956763.46 frames. ], batch size: 25, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:07:26,172 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.711e+02 2.094e+02 2.546e+02 5.601e+02, threshold=4.189e+02, percent-clipped=2.0 +2023-03-26 12:07:28,433 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6913, 1.4601, 2.1104, 3.1488, 2.1875, 2.1963, 1.0935, 2.4018], + device='cuda:5'), covar=tensor([0.1668, 0.1435, 0.1223, 0.0561, 0.0735, 0.1817, 0.1639, 0.0599], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0135, 0.0166, 0.0101, 0.0138, 0.0127, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:07:46,362 INFO [finetune.py:976] (5/7) Epoch 10, batch 3300, loss[loss=0.2277, simple_loss=0.2969, pruned_loss=0.0792, over 4929.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2629, pruned_loss=0.0661, over 956438.46 frames. ], batch size: 42, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:07:55,833 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54862.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:07:57,398 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 12:08:06,191 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9491, 1.6643, 2.2152, 3.5693, 2.5674, 2.4640, 1.2041, 2.7493], + device='cuda:5'), covar=tensor([0.1622, 0.1436, 0.1327, 0.0538, 0.0694, 0.1386, 0.1708, 0.0617], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0165, 0.0101, 0.0137, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:08:20,114 INFO [finetune.py:976] (5/7) Epoch 10, batch 3350, loss[loss=0.1689, simple_loss=0.2355, pruned_loss=0.05112, over 4831.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2629, pruned_loss=0.06549, over 956096.94 frames. ], batch size: 33, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:08:24,426 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1620, 1.9221, 1.6473, 1.9477, 1.8705, 1.7577, 1.8417, 2.6640], + device='cuda:5'), covar=tensor([0.4757, 0.5810, 0.4195, 0.5078, 0.4922, 0.3098, 0.5054, 0.1979], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0259, 0.0222, 0.0278, 0.0242, 0.0208, 0.0245, 0.0211], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:08:47,104 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9093, 1.7445, 1.5742, 1.9213, 2.3510, 2.0064, 1.4103, 1.5127], + device='cuda:5'), covar=tensor([0.2120, 0.2043, 0.1888, 0.1706, 0.1686, 0.1154, 0.2606, 0.1963], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0207, 0.0207, 0.0189, 0.0239, 0.0180, 0.0213, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:08:48,201 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54923.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 12:08:57,774 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.164e+02 1.693e+02 1.965e+02 2.442e+02 4.084e+02, threshold=3.930e+02, percent-clipped=0.0 +2023-03-26 12:09:07,544 INFO [finetune.py:976] (5/7) Epoch 10, batch 3400, loss[loss=0.243, simple_loss=0.3029, pruned_loss=0.09159, over 4867.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2651, pruned_loss=0.06688, over 954978.56 frames. ], batch size: 34, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:09:10,677 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7601, 2.8403, 2.7261, 2.1384, 2.9329, 3.0822, 3.0566, 2.2673], + device='cuda:5'), covar=tensor([0.0703, 0.0676, 0.0723, 0.0937, 0.0720, 0.0657, 0.0689, 0.1453], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0133, 0.0143, 0.0123, 0.0119, 0.0142, 0.0142, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:09:38,065 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9573, 1.8236, 1.5286, 1.9994, 2.3442, 2.0643, 1.5632, 1.4709], + device='cuda:5'), covar=tensor([0.2185, 0.2153, 0.2133, 0.1743, 0.1894, 0.1219, 0.2758, 0.2143], + device='cuda:5'), in_proj_covar=tensor([0.0235, 0.0207, 0.0206, 0.0188, 0.0239, 0.0180, 0.0213, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:09:56,947 INFO [finetune.py:976] (5/7) Epoch 10, batch 3450, loss[loss=0.1726, simple_loss=0.2414, pruned_loss=0.05193, over 4750.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2649, pruned_loss=0.0667, over 956709.42 frames. ], batch size: 27, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:10:16,048 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55020.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 12:10:18,024 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-03-26 12:10:29,360 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.59 vs. limit=5.0 +2023-03-26 12:10:30,757 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.349e+01 1.534e+02 1.957e+02 2.350e+02 5.428e+02, threshold=3.914e+02, percent-clipped=3.0 +2023-03-26 12:10:51,432 INFO [finetune.py:976] (5/7) Epoch 10, batch 3500, loss[loss=0.211, simple_loss=0.2642, pruned_loss=0.07886, over 4740.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2637, pruned_loss=0.06692, over 955341.11 frames. ], batch size: 54, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:11:09,002 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 12:11:22,115 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1477, 3.5321, 3.7706, 3.9680, 3.9328, 3.6548, 4.2320, 1.4228], + device='cuda:5'), covar=tensor([0.0711, 0.0848, 0.0794, 0.0890, 0.1020, 0.1415, 0.0599, 0.5060], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0243, 0.0274, 0.0289, 0.0328, 0.0282, 0.0300, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:11:27,355 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3826, 2.0837, 1.5737, 0.7550, 1.7780, 1.8976, 1.7019, 1.9176], + device='cuda:5'), covar=tensor([0.0840, 0.0944, 0.1538, 0.2032, 0.1544, 0.2253, 0.2350, 0.0903], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0202, 0.0202, 0.0187, 0.0216, 0.0207, 0.0224, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:11:36,222 INFO [finetune.py:976] (5/7) Epoch 10, batch 3550, loss[loss=0.196, simple_loss=0.2583, pruned_loss=0.06685, over 4826.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2602, pruned_loss=0.06621, over 955971.76 frames. ], batch size: 30, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:11:37,526 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5814, 1.5321, 1.3053, 1.3578, 1.9282, 1.8598, 1.6417, 1.4150], + device='cuda:5'), covar=tensor([0.0354, 0.0380, 0.0613, 0.0423, 0.0214, 0.0483, 0.0371, 0.0442], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0109, 0.0139, 0.0114, 0.0101, 0.0103, 0.0092, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0927e-05, 8.5410e-05, 1.1063e-04, 8.9626e-05, 7.9314e-05, 7.6471e-05, + 6.9713e-05, 8.2996e-05], device='cuda:5') +2023-03-26 12:11:58,140 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2145, 2.0157, 1.7865, 2.1349, 2.0649, 2.0038, 1.9916, 2.6378], + device='cuda:5'), covar=tensor([0.3708, 0.4444, 0.3407, 0.4163, 0.3956, 0.2362, 0.4185, 0.1515], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0222, 0.0278, 0.0242, 0.0209, 0.0244, 0.0212], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:11:58,573 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.369e+01 1.530e+02 1.896e+02 2.280e+02 4.793e+02, threshold=3.791e+02, percent-clipped=5.0 +2023-03-26 12:12:09,224 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55148.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:12:09,744 INFO [finetune.py:976] (5/7) Epoch 10, batch 3600, loss[loss=0.1541, simple_loss=0.2141, pruned_loss=0.04707, over 4722.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.257, pruned_loss=0.06504, over 955375.87 frames. ], batch size: 23, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:12:43,416 INFO [finetune.py:976] (5/7) Epoch 10, batch 3650, loss[loss=0.264, simple_loss=0.3262, pruned_loss=0.1009, over 4835.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2579, pruned_loss=0.06537, over 955472.82 frames. ], batch size: 47, lr: 3.74e-03, grad_scale: 32.0 +2023-03-26 12:12:49,696 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55209.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:12:52,571 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55211.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:12:56,770 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55218.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 12:13:05,136 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2788, 1.4952, 1.2271, 1.4048, 1.7102, 1.5596, 1.3765, 1.2799], + device='cuda:5'), covar=tensor([0.0428, 0.0272, 0.0615, 0.0302, 0.0203, 0.0467, 0.0346, 0.0411], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0138, 0.0114, 0.0101, 0.0102, 0.0091, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.0504e-05, 8.4845e-05, 1.0998e-04, 8.9048e-05, 7.8876e-05, 7.5876e-05, + 6.9269e-05, 8.2553e-05], device='cuda:5') +2023-03-26 12:13:14,908 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.143e+02 1.616e+02 1.938e+02 2.270e+02 4.700e+02, threshold=3.875e+02, percent-clipped=1.0 +2023-03-26 12:13:26,509 INFO [finetune.py:976] (5/7) Epoch 10, batch 3700, loss[loss=0.19, simple_loss=0.2595, pruned_loss=0.06024, over 4821.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2622, pruned_loss=0.06633, over 955711.38 frames. ], batch size: 38, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:13:40,334 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55272.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:14:00,006 INFO [finetune.py:976] (5/7) Epoch 10, batch 3750, loss[loss=0.2055, simple_loss=0.2803, pruned_loss=0.0654, over 4895.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2649, pruned_loss=0.06774, over 956130.53 frames. ], batch size: 36, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:14:16,772 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55320.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 12:14:33,805 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.583e+02 1.835e+02 2.150e+02 3.880e+02, threshold=3.669e+02, percent-clipped=1.0 +2023-03-26 12:14:45,564 INFO [finetune.py:976] (5/7) Epoch 10, batch 3800, loss[loss=0.2168, simple_loss=0.2731, pruned_loss=0.08028, over 4896.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2663, pruned_loss=0.06811, over 955263.30 frames. ], batch size: 36, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:14:57,818 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=55368.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 12:15:27,044 INFO [finetune.py:976] (5/7) Epoch 10, batch 3850, loss[loss=0.1725, simple_loss=0.2436, pruned_loss=0.05073, over 4811.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2642, pruned_loss=0.0673, over 956580.86 frames. ], batch size: 33, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:15:38,817 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=6.47 vs. limit=5.0 +2023-03-26 12:15:49,874 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.578e+02 1.920e+02 2.344e+02 4.809e+02, threshold=3.839e+02, percent-clipped=3.0 +2023-03-26 12:16:01,516 INFO [finetune.py:976] (5/7) Epoch 10, batch 3900, loss[loss=0.2081, simple_loss=0.2559, pruned_loss=0.08015, over 4814.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2618, pruned_loss=0.06662, over 954649.59 frames. ], batch size: 25, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:16:40,760 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 12:16:44,730 INFO [finetune.py:976] (5/7) Epoch 10, batch 3950, loss[loss=0.1784, simple_loss=0.2298, pruned_loss=0.06352, over 4158.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2587, pruned_loss=0.06572, over 952503.80 frames. ], batch size: 18, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:16:48,765 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55504.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:16:58,367 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55518.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 12:17:13,740 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.580e+02 1.857e+02 2.217e+02 3.906e+02, threshold=3.714e+02, percent-clipped=1.0 +2023-03-26 12:17:35,773 INFO [finetune.py:976] (5/7) Epoch 10, batch 4000, loss[loss=0.1771, simple_loss=0.2469, pruned_loss=0.05367, over 4780.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2576, pruned_loss=0.0653, over 952759.62 frames. ], batch size: 28, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:17:48,290 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=55566.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:17:48,924 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55567.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:17:51,400 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2301, 1.9402, 2.4978, 1.6843, 2.3495, 2.3744, 1.7958, 2.6586], + device='cuda:5'), covar=tensor([0.1287, 0.1972, 0.1478, 0.1981, 0.0852, 0.1486, 0.2687, 0.0922], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0200, 0.0190, 0.0188, 0.0174, 0.0211, 0.0213, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:17:55,054 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1005, 2.0474, 1.7135, 2.1078, 2.8386, 2.1095, 1.9189, 1.4923], + device='cuda:5'), covar=tensor([0.2406, 0.2071, 0.2038, 0.1891, 0.1831, 0.1223, 0.2408, 0.2133], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0210, 0.0209, 0.0191, 0.0242, 0.0182, 0.0215, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:18:01,635 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7488, 1.2039, 0.9765, 1.7275, 2.0491, 1.4381, 1.5491, 1.6092], + device='cuda:5'), covar=tensor([0.1277, 0.1930, 0.1865, 0.1007, 0.1808, 0.1951, 0.1230, 0.1688], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0097, 0.0114, 0.0093, 0.0122, 0.0096, 0.0100, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:18:09,101 INFO [finetune.py:976] (5/7) Epoch 10, batch 4050, loss[loss=0.2059, simple_loss=0.2763, pruned_loss=0.06773, over 4837.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2606, pruned_loss=0.06683, over 950456.16 frames. ], batch size: 33, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:18:29,975 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6723, 2.5112, 2.0493, 2.8016, 2.5670, 2.2187, 3.1338, 2.5714], + device='cuda:5'), covar=tensor([0.1306, 0.2673, 0.3417, 0.2820, 0.2938, 0.1711, 0.2958, 0.2099], + device='cuda:5'), in_proj_covar=tensor([0.0175, 0.0188, 0.0234, 0.0254, 0.0240, 0.0197, 0.0213, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:18:34,628 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.736e+02 2.138e+02 2.510e+02 4.140e+02, threshold=4.276e+02, percent-clipped=4.0 +2023-03-26 12:18:44,822 INFO [finetune.py:976] (5/7) Epoch 10, batch 4100, loss[loss=0.1995, simple_loss=0.2638, pruned_loss=0.06758, over 4904.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2617, pruned_loss=0.06616, over 952058.64 frames. ], batch size: 37, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:19:17,498 INFO [finetune.py:976] (5/7) Epoch 10, batch 4150, loss[loss=0.1962, simple_loss=0.2706, pruned_loss=0.06092, over 4925.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2645, pruned_loss=0.06718, over 953309.44 frames. ], batch size: 42, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:19:49,996 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.688e+02 2.035e+02 2.462e+02 3.895e+02, threshold=4.069e+02, percent-clipped=0.0 +2023-03-26 12:19:59,100 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55748.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:19:59,614 INFO [finetune.py:976] (5/7) Epoch 10, batch 4200, loss[loss=0.2059, simple_loss=0.2734, pruned_loss=0.06915, over 4810.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2646, pruned_loss=0.06674, over 953177.66 frames. ], batch size: 38, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:20:46,058 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8113, 1.7366, 1.5412, 2.0079, 2.4511, 2.0682, 1.6354, 1.4770], + device='cuda:5'), covar=tensor([0.2176, 0.2019, 0.1981, 0.1530, 0.1618, 0.1066, 0.2443, 0.1896], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0209, 0.0209, 0.0190, 0.0241, 0.0181, 0.0214, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:20:53,374 INFO [finetune.py:976] (5/7) Epoch 10, batch 4250, loss[loss=0.1831, simple_loss=0.2463, pruned_loss=0.06001, over 4820.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2628, pruned_loss=0.06639, over 953350.21 frames. ], batch size: 33, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:20:57,626 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55804.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:21:06,141 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55809.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:21:17,762 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.98 vs. limit=5.0 +2023-03-26 12:21:36,493 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6647, 1.6079, 1.9997, 1.8994, 1.7782, 3.1476, 1.4472, 1.8189], + device='cuda:5'), covar=tensor([0.0853, 0.1510, 0.1239, 0.0838, 0.1265, 0.0301, 0.1266, 0.1373], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0077, 0.0091, 0.0082, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:21:38,841 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.929e+01 1.631e+02 1.908e+02 2.201e+02 4.056e+02, threshold=3.816e+02, percent-clipped=0.0 +2023-03-26 12:21:48,063 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:21:48,563 INFO [finetune.py:976] (5/7) Epoch 10, batch 4300, loss[loss=0.1919, simple_loss=0.2468, pruned_loss=0.06856, over 4781.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2598, pruned_loss=0.06565, over 954502.33 frames. ], batch size: 28, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:21:50,382 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=55852.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:22:10,864 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55867.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:22:28,645 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8188, 3.9117, 3.7659, 1.8110, 4.0256, 3.1175, 0.7968, 2.7833], + device='cuda:5'), covar=tensor([0.2385, 0.1987, 0.1428, 0.3166, 0.0984, 0.0929, 0.4416, 0.1331], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0175, 0.0159, 0.0128, 0.0156, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 12:22:36,975 INFO [finetune.py:976] (5/7) Epoch 10, batch 4350, loss[loss=0.1876, simple_loss=0.2633, pruned_loss=0.05591, over 4907.00 frames. ], tot_loss[loss=0.1923, simple_loss=0.2567, pruned_loss=0.06396, over 956490.05 frames. ], batch size: 37, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:22:48,848 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55909.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:22:58,947 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=55915.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:23:23,254 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.622e+02 1.928e+02 2.410e+02 3.855e+02, threshold=3.856e+02, percent-clipped=1.0 +2023-03-26 12:23:37,719 INFO [finetune.py:976] (5/7) Epoch 10, batch 4400, loss[loss=0.2433, simple_loss=0.3061, pruned_loss=0.09027, over 4809.00 frames. ], tot_loss[loss=0.1933, simple_loss=0.2578, pruned_loss=0.06441, over 955099.91 frames. ], batch size: 45, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:24:11,807 INFO [finetune.py:976] (5/7) Epoch 10, batch 4450, loss[loss=0.2125, simple_loss=0.2826, pruned_loss=0.07117, over 4779.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2627, pruned_loss=0.0659, over 955995.36 frames. ], batch size: 29, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:24:22,918 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5993, 3.8000, 3.6196, 1.8858, 3.9401, 2.9579, 0.7530, 2.7050], + device='cuda:5'), covar=tensor([0.2480, 0.1876, 0.1415, 0.2987, 0.0979, 0.0934, 0.4290, 0.1416], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0173, 0.0158, 0.0127, 0.0154, 0.0121, 0.0144, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 12:24:25,777 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8009, 2.0916, 1.5155, 1.6177, 2.2533, 2.1678, 2.0396, 1.8436], + device='cuda:5'), covar=tensor([0.0345, 0.0301, 0.0540, 0.0342, 0.0245, 0.0504, 0.0270, 0.0339], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0110, 0.0138, 0.0114, 0.0100, 0.0102, 0.0092, 0.0106], + device='cuda:5'), out_proj_covar=tensor([7.0465e-05, 8.5514e-05, 1.1005e-04, 8.9036e-05, 7.8511e-05, 7.5756e-05, + 6.9311e-05, 8.1889e-05], device='cuda:5') +2023-03-26 12:24:36,683 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.562e+02 1.840e+02 2.258e+02 4.729e+02, threshold=3.681e+02, percent-clipped=2.0 +2023-03-26 12:24:43,419 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0570, 2.0417, 1.5879, 2.0416, 1.9541, 1.8749, 1.9336, 2.6169], + device='cuda:5'), covar=tensor([0.5402, 0.5185, 0.4355, 0.5166, 0.5039, 0.3219, 0.4780, 0.2302], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0257, 0.0221, 0.0277, 0.0241, 0.0208, 0.0244, 0.0212], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:24:46,913 INFO [finetune.py:976] (5/7) Epoch 10, batch 4500, loss[loss=0.1814, simple_loss=0.2456, pruned_loss=0.0586, over 4808.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2647, pruned_loss=0.06659, over 955296.61 frames. ], batch size: 29, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:25:11,466 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-03-26 12:25:31,196 INFO [finetune.py:976] (5/7) Epoch 10, batch 4550, loss[loss=0.1801, simple_loss=0.2447, pruned_loss=0.05772, over 4808.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2665, pruned_loss=0.06781, over 955440.75 frames. ], batch size: 25, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:25:34,312 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56104.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:25:34,989 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6478, 1.5439, 1.4499, 1.7250, 2.2717, 1.7800, 1.4936, 1.3744], + device='cuda:5'), covar=tensor([0.2488, 0.2249, 0.2183, 0.1830, 0.1754, 0.1258, 0.2619, 0.2078], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0209, 0.0209, 0.0190, 0.0241, 0.0181, 0.0214, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:25:53,264 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.089e+02 1.686e+02 1.941e+02 2.447e+02 3.858e+02, threshold=3.882e+02, percent-clipped=3.0 +2023-03-26 12:25:59,481 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5962, 1.7536, 1.3999, 1.4891, 2.0118, 1.8848, 1.6634, 1.4514], + device='cuda:5'), covar=tensor([0.0364, 0.0312, 0.0507, 0.0315, 0.0190, 0.0393, 0.0344, 0.0351], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0109, 0.0138, 0.0113, 0.0100, 0.0102, 0.0091, 0.0106], + device='cuda:5'), out_proj_covar=tensor([7.0132e-05, 8.5165e-05, 1.0984e-04, 8.8504e-05, 7.8153e-05, 7.5806e-05, + 6.9090e-05, 8.1547e-05], device='cuda:5') +2023-03-26 12:26:02,519 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56145.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:26:04,873 INFO [finetune.py:976] (5/7) Epoch 10, batch 4600, loss[loss=0.1848, simple_loss=0.2467, pruned_loss=0.06147, over 4818.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2657, pruned_loss=0.06726, over 956516.47 frames. ], batch size: 33, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:26:27,696 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.82 vs. limit=2.0 +2023-03-26 12:26:40,183 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 12:26:40,469 INFO [finetune.py:976] (5/7) Epoch 10, batch 4650, loss[loss=0.1806, simple_loss=0.2423, pruned_loss=0.05949, over 4809.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2629, pruned_loss=0.06658, over 957579.80 frames. ], batch size: 25, lr: 3.73e-03, grad_scale: 32.0 +2023-03-26 12:26:43,721 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56204.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:26:44,989 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56206.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:27:11,820 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.027e+02 1.561e+02 1.851e+02 2.355e+02 3.865e+02, threshold=3.702e+02, percent-clipped=0.0 +2023-03-26 12:27:14,387 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8791, 1.5656, 2.2355, 1.4637, 2.0503, 2.0270, 1.5614, 2.3113], + device='cuda:5'), covar=tensor([0.1207, 0.2038, 0.1227, 0.1901, 0.0738, 0.1438, 0.2725, 0.0727], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0204, 0.0193, 0.0190, 0.0176, 0.0214, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:27:23,136 INFO [finetune.py:976] (5/7) Epoch 10, batch 4700, loss[loss=0.1753, simple_loss=0.232, pruned_loss=0.05934, over 4905.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2586, pruned_loss=0.06438, over 958572.82 frames. ], batch size: 35, lr: 3.73e-03, grad_scale: 64.0 +2023-03-26 12:28:05,713 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1117, 2.0786, 2.2576, 0.9675, 2.5577, 2.7266, 2.2445, 2.0115], + device='cuda:5'), covar=tensor([0.1067, 0.0808, 0.0478, 0.0790, 0.0573, 0.0675, 0.0485, 0.0814], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0156, 0.0123, 0.0134, 0.0132, 0.0126, 0.0146, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.6258e-05, 1.1457e-04, 8.8371e-05, 9.6940e-05, 9.4720e-05, 9.1562e-05, + 1.0704e-04, 1.0858e-04], device='cuda:5') +2023-03-26 12:28:09,016 INFO [finetune.py:976] (5/7) Epoch 10, batch 4750, loss[loss=0.173, simple_loss=0.2414, pruned_loss=0.05227, over 4839.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2555, pruned_loss=0.06287, over 957471.59 frames. ], batch size: 47, lr: 3.73e-03, grad_scale: 64.0 +2023-03-26 12:28:29,786 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1881, 2.2695, 1.9112, 1.7884, 2.5715, 2.5800, 2.3358, 2.0782], + device='cuda:5'), covar=tensor([0.0352, 0.0311, 0.0489, 0.0332, 0.0228, 0.0540, 0.0312, 0.0363], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0110, 0.0140, 0.0114, 0.0101, 0.0103, 0.0092, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.0920e-05, 8.5748e-05, 1.1104e-04, 8.9369e-05, 7.8791e-05, 7.6351e-05, + 6.9559e-05, 8.2673e-05], device='cuda:5') +2023-03-26 12:28:30,218 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.456e+02 1.800e+02 2.273e+02 6.888e+02, threshold=3.601e+02, percent-clipped=2.0 +2023-03-26 12:28:42,335 INFO [finetune.py:976] (5/7) Epoch 10, batch 4800, loss[loss=0.2384, simple_loss=0.3059, pruned_loss=0.0854, over 4751.00 frames. ], tot_loss[loss=0.193, simple_loss=0.258, pruned_loss=0.064, over 956592.93 frames. ], batch size: 54, lr: 3.73e-03, grad_scale: 64.0 +2023-03-26 12:29:14,950 INFO [finetune.py:976] (5/7) Epoch 10, batch 4850, loss[loss=0.235, simple_loss=0.2961, pruned_loss=0.08691, over 4798.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2622, pruned_loss=0.06533, over 954037.75 frames. ], batch size: 51, lr: 3.73e-03, grad_scale: 64.0 +2023-03-26 12:29:19,700 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56404.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:29:26,349 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8301, 1.6440, 1.4245, 1.1665, 1.6345, 1.5754, 1.6193, 2.1945], + device='cuda:5'), covar=tensor([0.4443, 0.4108, 0.3634, 0.4127, 0.3944, 0.2514, 0.3972, 0.1875], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0257, 0.0221, 0.0277, 0.0241, 0.0208, 0.0245, 0.0212], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:29:37,025 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.167e+02 1.738e+02 2.004e+02 2.451e+02 5.164e+02, threshold=4.009e+02, percent-clipped=2.0 +2023-03-26 12:29:48,225 INFO [finetune.py:976] (5/7) Epoch 10, batch 4900, loss[loss=0.2264, simple_loss=0.2982, pruned_loss=0.07734, over 4831.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2654, pruned_loss=0.06697, over 954494.84 frames. ], batch size: 30, lr: 3.73e-03, grad_scale: 64.0 +2023-03-26 12:29:50,495 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=56452.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:30:12,078 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0141, 1.9437, 2.1691, 1.4535, 2.0766, 2.1973, 2.0680, 1.6989], + device='cuda:5'), covar=tensor([0.0570, 0.0611, 0.0563, 0.0827, 0.0558, 0.0638, 0.0588, 0.1034], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0134, 0.0144, 0.0126, 0.0121, 0.0144, 0.0144, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:30:26,420 INFO [finetune.py:976] (5/7) Epoch 10, batch 4950, loss[loss=0.2344, simple_loss=0.2945, pruned_loss=0.08715, over 4836.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2677, pruned_loss=0.068, over 957009.32 frames. ], batch size: 47, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:30:32,460 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56501.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:30:34,875 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56504.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:30:49,800 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56523.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 12:30:55,720 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.612e+02 1.965e+02 2.275e+02 4.231e+02, threshold=3.931e+02, percent-clipped=1.0 +2023-03-26 12:31:06,809 INFO [finetune.py:976] (5/7) Epoch 10, batch 5000, loss[loss=0.1564, simple_loss=0.2191, pruned_loss=0.04687, over 4152.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2648, pruned_loss=0.06682, over 956513.73 frames. ], batch size: 65, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:31:08,679 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=56552.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:31:08,709 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.7434, 4.1000, 4.3407, 4.5502, 4.4633, 4.1914, 4.7945, 2.0136], + device='cuda:5'), covar=tensor([0.0733, 0.0885, 0.0678, 0.0729, 0.1218, 0.1866, 0.0724, 0.4702], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0246, 0.0278, 0.0292, 0.0334, 0.0287, 0.0303, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:31:29,667 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56584.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 12:31:31,504 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3536, 2.2727, 2.4431, 1.7149, 2.4440, 2.5782, 2.4316, 1.9521], + device='cuda:5'), covar=tensor([0.0584, 0.0662, 0.0668, 0.0896, 0.0629, 0.0666, 0.0619, 0.1032], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0134, 0.0144, 0.0125, 0.0120, 0.0144, 0.0143, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:31:39,222 INFO [finetune.py:976] (5/7) Epoch 10, batch 5050, loss[loss=0.1526, simple_loss=0.215, pruned_loss=0.04504, over 4820.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2604, pruned_loss=0.0655, over 955481.35 frames. ], batch size: 33, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:32:04,806 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.243e+02 1.580e+02 1.790e+02 2.049e+02 5.062e+02, threshold=3.579e+02, percent-clipped=1.0 +2023-03-26 12:32:14,688 INFO [finetune.py:976] (5/7) Epoch 10, batch 5100, loss[loss=0.1622, simple_loss=0.2221, pruned_loss=0.05118, over 4003.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2573, pruned_loss=0.06449, over 956370.69 frames. ], batch size: 17, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:32:18,885 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.45 vs. limit=5.0 +2023-03-26 12:32:44,345 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3189, 1.3711, 1.1852, 1.3707, 1.6914, 1.4827, 1.3907, 1.2209], + device='cuda:5'), covar=tensor([0.0375, 0.0262, 0.0565, 0.0278, 0.0198, 0.0505, 0.0291, 0.0367], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0110, 0.0139, 0.0114, 0.0101, 0.0102, 0.0092, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.0921e-05, 8.5684e-05, 1.1064e-04, 8.9233e-05, 7.8822e-05, 7.5948e-05, + 6.9472e-05, 8.2705e-05], device='cuda:5') +2023-03-26 12:32:55,109 INFO [finetune.py:976] (5/7) Epoch 10, batch 5150, loss[loss=0.1346, simple_loss=0.2087, pruned_loss=0.03025, over 4774.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2588, pruned_loss=0.06582, over 954703.06 frames. ], batch size: 26, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:33:11,366 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2129, 1.7707, 2.0980, 2.0310, 1.7797, 1.8095, 1.9772, 2.0005], + device='cuda:5'), covar=tensor([0.3503, 0.4238, 0.3353, 0.4483, 0.5229, 0.4567, 0.5304, 0.3321], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0240, 0.0253, 0.0258, 0.0253, 0.0230, 0.0275, 0.0230], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:33:23,849 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-03-26 12:33:27,185 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.919e+01 1.632e+02 1.974e+02 2.331e+02 5.610e+02, threshold=3.948e+02, percent-clipped=3.0 +2023-03-26 12:33:35,169 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7379, 2.5248, 2.0929, 0.9194, 2.3331, 2.0751, 1.9032, 2.2966], + device='cuda:5'), covar=tensor([0.0792, 0.0764, 0.1583, 0.2081, 0.1398, 0.2041, 0.2068, 0.0885], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0200, 0.0202, 0.0187, 0.0215, 0.0208, 0.0224, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:33:36,884 INFO [finetune.py:976] (5/7) Epoch 10, batch 5200, loss[loss=0.2308, simple_loss=0.2973, pruned_loss=0.08217, over 4891.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2608, pruned_loss=0.06618, over 952196.94 frames. ], batch size: 32, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:34:10,224 INFO [finetune.py:976] (5/7) Epoch 10, batch 5250, loss[loss=0.1901, simple_loss=0.2655, pruned_loss=0.05738, over 4885.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2625, pruned_loss=0.06688, over 950855.85 frames. ], batch size: 32, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:34:11,647 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56801.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:34:12,865 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56803.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:34:27,533 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5371, 1.5030, 2.2039, 1.9103, 1.8399, 3.5016, 1.4149, 1.7158], + device='cuda:5'), covar=tensor([0.1036, 0.1751, 0.1703, 0.1023, 0.1401, 0.0331, 0.1515, 0.1625], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0078, 0.0091, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:34:32,530 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1875, 1.9281, 2.3469, 3.3823, 2.4745, 2.6815, 1.3829, 2.7702], + device='cuda:5'), covar=tensor([0.1425, 0.1186, 0.1209, 0.0610, 0.0738, 0.1335, 0.1596, 0.0556], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0136, 0.0165, 0.0101, 0.0138, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:34:34,254 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.164e+02 1.706e+02 2.047e+02 2.503e+02 5.084e+02, threshold=4.093e+02, percent-clipped=2.0 +2023-03-26 12:34:43,965 INFO [finetune.py:976] (5/7) Epoch 10, batch 5300, loss[loss=0.2132, simple_loss=0.2904, pruned_loss=0.06805, over 4846.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2651, pruned_loss=0.06781, over 950275.73 frames. ], batch size: 47, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:34:44,026 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=56849.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:34:53,665 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56864.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:35:04,619 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56879.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 12:35:17,631 INFO [finetune.py:976] (5/7) Epoch 10, batch 5350, loss[loss=0.208, simple_loss=0.2685, pruned_loss=0.07374, over 4783.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2645, pruned_loss=0.06656, over 950964.09 frames. ], batch size: 51, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:35:23,296 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56908.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:35:25,163 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3295, 2.2648, 2.3308, 1.7065, 2.3702, 2.4638, 2.4190, 2.0444], + device='cuda:5'), covar=tensor([0.0545, 0.0671, 0.0708, 0.0858, 0.0624, 0.0688, 0.0603, 0.1031], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0135, 0.0144, 0.0126, 0.0121, 0.0144, 0.0144, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:35:49,118 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.924e+01 1.584e+02 1.842e+02 2.192e+02 3.665e+02, threshold=3.684e+02, percent-clipped=0.0 +2023-03-26 12:36:02,276 INFO [finetune.py:976] (5/7) Epoch 10, batch 5400, loss[loss=0.1914, simple_loss=0.2592, pruned_loss=0.06179, over 4757.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2633, pruned_loss=0.06649, over 952404.33 frames. ], batch size: 54, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:36:15,007 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56969.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:36:32,495 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56993.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:36:33,663 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56995.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:36:35,991 INFO [finetune.py:976] (5/7) Epoch 10, batch 5450, loss[loss=0.2075, simple_loss=0.2586, pruned_loss=0.0782, over 4833.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2602, pruned_loss=0.06593, over 953475.69 frames. ], batch size: 30, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:36:57,712 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.491e+02 1.807e+02 2.344e+02 4.842e+02, threshold=3.613e+02, percent-clipped=5.0 +2023-03-26 12:37:07,822 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2692, 1.7999, 2.2731, 2.1118, 1.8289, 1.8848, 2.0796, 2.0246], + device='cuda:5'), covar=tensor([0.4384, 0.5132, 0.3693, 0.4738, 0.5849, 0.4632, 0.5462, 0.3829], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0240, 0.0254, 0.0258, 0.0253, 0.0230, 0.0275, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:37:09,488 INFO [finetune.py:976] (5/7) Epoch 10, batch 5500, loss[loss=0.1824, simple_loss=0.2489, pruned_loss=0.05796, over 4834.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2579, pruned_loss=0.06547, over 951755.30 frames. ], batch size: 33, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:37:12,661 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57054.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:37:13,838 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57056.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:37:22,671 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-26 12:37:26,618 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6320, 1.3054, 0.8068, 1.6033, 2.0815, 1.4349, 1.4307, 1.6280], + device='cuda:5'), covar=tensor([0.1613, 0.2145, 0.2106, 0.1241, 0.1996, 0.1965, 0.1565, 0.2066], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0112, 0.0092, 0.0120, 0.0095, 0.0099, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:37:43,350 INFO [finetune.py:976] (5/7) Epoch 10, batch 5550, loss[loss=0.1955, simple_loss=0.2755, pruned_loss=0.0577, over 4857.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2586, pruned_loss=0.06563, over 952968.70 frames. ], batch size: 44, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:38:06,740 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.869e+01 1.587e+02 1.788e+02 2.090e+02 3.209e+02, threshold=3.576e+02, percent-clipped=0.0 +2023-03-26 12:38:25,603 INFO [finetune.py:976] (5/7) Epoch 10, batch 5600, loss[loss=0.2047, simple_loss=0.2669, pruned_loss=0.07123, over 4918.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2633, pruned_loss=0.06659, over 953015.66 frames. ], batch size: 37, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:38:33,403 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 12:38:35,046 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57159.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:38:45,542 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4895, 1.4732, 1.4770, 0.9025, 1.5618, 1.7905, 1.7143, 1.3362], + device='cuda:5'), covar=tensor([0.1268, 0.0766, 0.0431, 0.0629, 0.0470, 0.0495, 0.0395, 0.0983], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0154, 0.0121, 0.0133, 0.0131, 0.0124, 0.0143, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.4973e-05, 1.1266e-04, 8.7304e-05, 9.6104e-05, 9.3572e-05, 9.0322e-05, + 1.0497e-04, 1.0664e-04], device='cuda:5') +2023-03-26 12:38:46,665 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57179.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 12:38:50,744 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57186.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:38:54,773 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-26 12:38:57,565 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57197.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:38:58,646 INFO [finetune.py:976] (5/7) Epoch 10, batch 5650, loss[loss=0.2025, simple_loss=0.2718, pruned_loss=0.06657, over 4915.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2664, pruned_loss=0.06706, over 953607.17 frames. ], batch size: 37, lr: 3.72e-03, grad_scale: 64.0 +2023-03-26 12:39:15,291 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57227.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 12:39:19,291 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.551e+02 1.804e+02 2.162e+02 3.713e+02, threshold=3.608e+02, percent-clipped=1.0 +2023-03-26 12:39:21,161 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0884, 2.0094, 2.5635, 2.4389, 2.2554, 4.6968, 1.9891, 2.3048], + device='cuda:5'), covar=tensor([0.0867, 0.1518, 0.0979, 0.0836, 0.1351, 0.0162, 0.1286, 0.1497], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0075, 0.0077, 0.0091, 0.0082, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:39:25,356 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57244.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:39:27,132 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57247.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:39:28,225 INFO [finetune.py:976] (5/7) Epoch 10, batch 5700, loss[loss=0.138, simple_loss=0.2015, pruned_loss=0.03725, over 4170.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2617, pruned_loss=0.06669, over 931888.94 frames. ], batch size: 18, lr: 3.72e-03, grad_scale: 32.0 +2023-03-26 12:39:34,006 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57258.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 12:39:36,194 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 12:39:37,766 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57264.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:39:38,783 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-26 12:40:00,695 INFO [finetune.py:976] (5/7) Epoch 11, batch 0, loss[loss=0.2807, simple_loss=0.3176, pruned_loss=0.1219, over 4179.00 frames. ], tot_loss[loss=0.2807, simple_loss=0.3176, pruned_loss=0.1219, over 4179.00 frames. ], batch size: 66, lr: 3.72e-03, grad_scale: 16.0 +2023-03-26 12:40:00,695 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 12:40:16,055 INFO [finetune.py:1010] (5/7) Epoch 11, validation: loss=0.1597, simple_loss=0.2306, pruned_loss=0.04438, over 2265189.00 frames. +2023-03-26 12:40:16,056 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 12:40:37,203 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57305.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:40:45,888 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0679, 1.0134, 0.9999, 0.3096, 0.8713, 1.2121, 1.2212, 1.0063], + device='cuda:5'), covar=tensor([0.0831, 0.0475, 0.0494, 0.0579, 0.0505, 0.0502, 0.0372, 0.0616], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0152, 0.0120, 0.0132, 0.0130, 0.0123, 0.0142, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.4325e-05, 1.1183e-04, 8.6372e-05, 9.5568e-05, 9.2786e-05, 8.9576e-05, + 1.0424e-04, 1.0590e-04], device='cuda:5') +2023-03-26 12:40:59,550 INFO [finetune.py:976] (5/7) Epoch 11, batch 50, loss[loss=0.1905, simple_loss=0.2559, pruned_loss=0.06255, over 4753.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.272, pruned_loss=0.07183, over 217450.51 frames. ], batch size: 28, lr: 3.72e-03, grad_scale: 16.0 +2023-03-26 12:41:10,029 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.036e+02 1.580e+02 1.868e+02 2.535e+02 4.204e+02, threshold=3.735e+02, percent-clipped=3.0 +2023-03-26 12:41:18,572 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57349.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:41:19,797 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57351.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:41:38,108 INFO [finetune.py:976] (5/7) Epoch 11, batch 100, loss[loss=0.1509, simple_loss=0.2208, pruned_loss=0.04046, over 4764.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2637, pruned_loss=0.0698, over 379885.98 frames. ], batch size: 28, lr: 3.72e-03, grad_scale: 16.0 +2023-03-26 12:41:46,599 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8782, 1.8917, 2.0043, 1.4035, 2.0673, 2.1350, 2.0466, 1.6374], + device='cuda:5'), covar=tensor([0.0600, 0.0671, 0.0668, 0.0841, 0.0647, 0.0644, 0.0566, 0.1115], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0134, 0.0144, 0.0125, 0.0120, 0.0144, 0.0145, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:41:51,434 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57398.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:42:11,571 INFO [finetune.py:976] (5/7) Epoch 11, batch 150, loss[loss=0.2027, simple_loss=0.2617, pruned_loss=0.07188, over 4903.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2581, pruned_loss=0.06704, over 504951.76 frames. ], batch size: 35, lr: 3.72e-03, grad_scale: 16.0 +2023-03-26 12:42:16,482 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0718, 1.0152, 0.9456, 0.4959, 0.8421, 1.1927, 1.1779, 0.9965], + device='cuda:5'), covar=tensor([0.0857, 0.0525, 0.0536, 0.0484, 0.0548, 0.0477, 0.0381, 0.0598], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0152, 0.0119, 0.0131, 0.0129, 0.0122, 0.0142, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.3679e-05, 1.1148e-04, 8.6018e-05, 9.5016e-05, 9.2424e-05, 8.9264e-05, + 1.0373e-04, 1.0549e-04], device='cuda:5') +2023-03-26 12:42:16,966 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.728e+02 2.070e+02 2.489e+02 4.280e+02, threshold=4.140e+02, percent-clipped=3.0 +2023-03-26 12:42:23,810 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7889, 1.8626, 1.6026, 1.7646, 2.1782, 2.0534, 1.8042, 1.6016], + device='cuda:5'), covar=tensor([0.0311, 0.0270, 0.0546, 0.0287, 0.0172, 0.0383, 0.0309, 0.0328], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0108, 0.0137, 0.0112, 0.0099, 0.0101, 0.0091, 0.0106], + device='cuda:5'), out_proj_covar=tensor([7.0002e-05, 8.3924e-05, 1.0879e-04, 8.7847e-05, 7.7611e-05, 7.4995e-05, + 6.9083e-05, 8.1619e-05], device='cuda:5') +2023-03-26 12:42:31,677 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57459.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:42:31,701 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57459.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:42:33,497 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57462.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:42:35,983 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1996, 3.6335, 3.8395, 4.0026, 3.9665, 3.7097, 4.2809, 1.4628], + device='cuda:5'), covar=tensor([0.0842, 0.0858, 0.0819, 0.1135, 0.1293, 0.1574, 0.0674, 0.5357], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0245, 0.0276, 0.0290, 0.0331, 0.0285, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:42:36,012 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6132, 1.5157, 1.5349, 1.5969, 1.0587, 3.3853, 1.3994, 1.9945], + device='cuda:5'), covar=tensor([0.3314, 0.2417, 0.2062, 0.2272, 0.1899, 0.0200, 0.2650, 0.1189], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0119, 0.0122, 0.0115, 0.0098, 0.0099, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:42:44,007 INFO [finetune.py:976] (5/7) Epoch 11, batch 200, loss[loss=0.2249, simple_loss=0.2749, pruned_loss=0.0875, over 4228.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2564, pruned_loss=0.06626, over 604366.99 frames. ], batch size: 65, lr: 3.72e-03, grad_scale: 16.0 +2023-03-26 12:42:52,814 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1518, 1.7637, 1.9379, 1.9954, 1.7340, 1.7713, 1.9980, 1.8237], + device='cuda:5'), covar=tensor([0.5377, 0.5611, 0.5092, 0.5366, 0.7100, 0.5633, 0.6700, 0.5045], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0239, 0.0254, 0.0257, 0.0252, 0.0229, 0.0274, 0.0230], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:42:54,085 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.61 vs. limit=5.0 +2023-03-26 12:42:56,412 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57495.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:43:03,708 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57507.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:43:14,465 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57523.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:43:17,310 INFO [finetune.py:976] (5/7) Epoch 11, batch 250, loss[loss=0.2213, simple_loss=0.287, pruned_loss=0.07776, over 4851.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2596, pruned_loss=0.06646, over 682837.63 frames. ], batch size: 47, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:43:22,638 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.584e+02 1.966e+02 2.356e+02 4.681e+02, threshold=3.932e+02, percent-clipped=1.0 +2023-03-26 12:43:26,176 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5215, 3.3716, 3.2307, 1.6079, 3.4786, 2.5142, 0.7918, 2.1800], + device='cuda:5'), covar=tensor([0.2516, 0.2046, 0.1615, 0.3168, 0.1233, 0.1130, 0.4235, 0.1571], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0173, 0.0158, 0.0128, 0.0155, 0.0121, 0.0144, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 12:43:27,973 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57542.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:43:43,776 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57553.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 12:43:45,616 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57556.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:43:46,877 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3180, 1.8630, 2.2736, 2.2641, 2.0054, 1.9508, 2.1578, 2.1276], + device='cuda:5'), covar=tensor([0.4389, 0.5124, 0.4227, 0.4652, 0.6045, 0.4458, 0.5747, 0.3881], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0239, 0.0254, 0.0257, 0.0252, 0.0229, 0.0274, 0.0230], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:43:53,989 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8650, 3.3086, 3.5393, 3.7138, 3.5997, 3.3568, 3.9172, 1.2202], + device='cuda:5'), covar=tensor([0.1009, 0.1066, 0.0940, 0.1280, 0.1537, 0.1764, 0.0895, 0.5763], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0247, 0.0277, 0.0292, 0.0333, 0.0287, 0.0302, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:43:55,219 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57564.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:44:08,374 INFO [finetune.py:976] (5/7) Epoch 11, batch 300, loss[loss=0.1609, simple_loss=0.2223, pruned_loss=0.04978, over 4439.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.262, pruned_loss=0.06683, over 742207.55 frames. ], batch size: 19, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:44:09,337 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.10 vs. limit=5.0 +2023-03-26 12:44:18,957 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8314, 3.6694, 3.4332, 1.8580, 3.8014, 2.7499, 0.7845, 2.3989], + device='cuda:5'), covar=tensor([0.2291, 0.1774, 0.1612, 0.3194, 0.1072, 0.1067, 0.4558, 0.1533], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0173, 0.0159, 0.0128, 0.0156, 0.0121, 0.0144, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 12:44:24,431 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57600.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:44:25,776 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 12:44:31,734 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57612.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:44:40,868 INFO [finetune.py:976] (5/7) Epoch 11, batch 350, loss[loss=0.1981, simple_loss=0.2711, pruned_loss=0.06249, over 4904.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.264, pruned_loss=0.06654, over 789789.23 frames. ], batch size: 35, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:44:46,739 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.573e+02 1.819e+02 2.403e+02 4.156e+02, threshold=3.639e+02, percent-clipped=1.0 +2023-03-26 12:44:56,789 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57649.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:44:58,442 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57651.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:45:14,036 INFO [finetune.py:976] (5/7) Epoch 11, batch 400, loss[loss=0.1604, simple_loss=0.2252, pruned_loss=0.04777, over 4723.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2635, pruned_loss=0.06555, over 824687.16 frames. ], batch size: 23, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:45:26,788 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3442, 1.3999, 1.6425, 1.1852, 1.3149, 1.4643, 1.3477, 1.6139], + device='cuda:5'), covar=tensor([0.1182, 0.1949, 0.1208, 0.1343, 0.0978, 0.1213, 0.2790, 0.0846], + device='cuda:5'), in_proj_covar=tensor([0.0199, 0.0206, 0.0194, 0.0192, 0.0178, 0.0216, 0.0219, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:45:30,902 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57697.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:45:32,136 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57699.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:45:46,099 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2102, 2.0365, 1.7380, 2.1955, 2.2337, 1.9198, 2.5875, 2.1843], + device='cuda:5'), covar=tensor([0.1514, 0.2422, 0.3469, 0.2799, 0.2796, 0.1851, 0.3032, 0.2022], + device='cuda:5'), in_proj_covar=tensor([0.0175, 0.0188, 0.0234, 0.0255, 0.0240, 0.0197, 0.0213, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:45:49,609 INFO [finetune.py:976] (5/7) Epoch 11, batch 450, loss[loss=0.1546, simple_loss=0.2271, pruned_loss=0.04102, over 4823.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2609, pruned_loss=0.06444, over 853255.60 frames. ], batch size: 40, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:45:51,639 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 12:45:53,865 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57733.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:45:55,473 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.158e+02 1.602e+02 1.902e+02 2.220e+02 3.989e+02, threshold=3.804e+02, percent-clipped=2.0 +2023-03-26 12:46:15,572 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57754.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:46:32,832 INFO [finetune.py:976] (5/7) Epoch 11, batch 500, loss[loss=0.181, simple_loss=0.2482, pruned_loss=0.05684, over 4781.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.259, pruned_loss=0.06417, over 873187.60 frames. ], batch size: 29, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:46:33,032 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 12:46:45,713 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57794.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:46:54,683 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4947, 1.4540, 1.6246, 1.6351, 1.5301, 3.1200, 1.2655, 1.5893], + device='cuda:5'), covar=tensor([0.0895, 0.1570, 0.1097, 0.0877, 0.1399, 0.0256, 0.1387, 0.1562], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0078, 0.0091, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:47:01,359 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57818.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:47:06,699 INFO [finetune.py:976] (5/7) Epoch 11, batch 550, loss[loss=0.1998, simple_loss=0.2611, pruned_loss=0.06926, over 4815.00 frames. ], tot_loss[loss=0.1934, simple_loss=0.2578, pruned_loss=0.06454, over 892125.39 frames. ], batch size: 41, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:47:11,536 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.635e+02 1.936e+02 2.160e+02 3.511e+02, threshold=3.871e+02, percent-clipped=0.0 +2023-03-26 12:47:16,784 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57842.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:47:23,738 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57851.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:47:24,992 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57853.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 12:47:28,015 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3736, 2.8865, 2.7931, 1.2871, 2.9638, 2.0877, 0.8386, 1.8229], + device='cuda:5'), covar=tensor([0.2441, 0.2452, 0.1792, 0.3413, 0.1549, 0.1214, 0.4104, 0.1650], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0173, 0.0159, 0.0127, 0.0156, 0.0121, 0.0145, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 12:47:40,101 INFO [finetune.py:976] (5/7) Epoch 11, batch 600, loss[loss=0.181, simple_loss=0.2541, pruned_loss=0.05398, over 4810.00 frames. ], tot_loss[loss=0.1933, simple_loss=0.258, pruned_loss=0.0643, over 906260.97 frames. ], batch size: 51, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:47:48,476 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57890.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:47:56,175 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57900.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:47:56,760 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57901.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:48:13,625 INFO [finetune.py:976] (5/7) Epoch 11, batch 650, loss[loss=0.2183, simple_loss=0.2905, pruned_loss=0.07305, over 4902.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2596, pruned_loss=0.0648, over 915995.79 frames. ], batch size: 36, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:48:18,497 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.568e+02 1.897e+02 2.360e+02 4.682e+02, threshold=3.793e+02, percent-clipped=3.0 +2023-03-26 12:48:27,454 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=57948.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:48:30,326 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6286, 1.5283, 1.4180, 1.7098, 2.0045, 1.7679, 1.2367, 1.3403], + device='cuda:5'), covar=tensor([0.2183, 0.2088, 0.1914, 0.1647, 0.1658, 0.1191, 0.2635, 0.1937], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0208, 0.0208, 0.0189, 0.0242, 0.0181, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:48:48,747 INFO [finetune.py:976] (5/7) Epoch 11, batch 700, loss[loss=0.173, simple_loss=0.2383, pruned_loss=0.05389, over 4747.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2612, pruned_loss=0.06471, over 926980.21 frames. ], batch size: 59, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:49:01,312 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6505, 1.5545, 1.4037, 1.4831, 1.7725, 1.4362, 1.8385, 1.6376], + device='cuda:5'), covar=tensor([0.1345, 0.2109, 0.2828, 0.2311, 0.2399, 0.1609, 0.2716, 0.1849], + device='cuda:5'), in_proj_covar=tensor([0.0175, 0.0188, 0.0233, 0.0254, 0.0240, 0.0196, 0.0212, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:49:44,563 INFO [finetune.py:976] (5/7) Epoch 11, batch 750, loss[loss=0.1978, simple_loss=0.273, pruned_loss=0.06127, over 4874.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2634, pruned_loss=0.06582, over 932786.08 frames. ], batch size: 31, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:49:49,412 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.116e+01 1.579e+02 1.894e+02 2.321e+02 4.436e+02, threshold=3.789e+02, percent-clipped=3.0 +2023-03-26 12:50:02,181 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58054.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:50:18,112 INFO [finetune.py:976] (5/7) Epoch 11, batch 800, loss[loss=0.1878, simple_loss=0.2566, pruned_loss=0.05954, over 4920.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2629, pruned_loss=0.06505, over 939186.68 frames. ], batch size: 38, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:50:25,468 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58089.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:50:33,870 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=58102.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:50:45,549 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58118.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:50:51,412 INFO [finetune.py:976] (5/7) Epoch 11, batch 850, loss[loss=0.2368, simple_loss=0.2925, pruned_loss=0.09057, over 4808.00 frames. ], tot_loss[loss=0.1934, simple_loss=0.2594, pruned_loss=0.06373, over 943290.32 frames. ], batch size: 39, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:50:56,227 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.394e+01 1.505e+02 1.749e+02 2.082e+02 4.545e+02, threshold=3.498e+02, percent-clipped=2.0 +2023-03-26 12:50:56,924 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3884, 2.1453, 1.8003, 0.9318, 2.0033, 1.9060, 1.7869, 2.0535], + device='cuda:5'), covar=tensor([0.0872, 0.0775, 0.1468, 0.1829, 0.1280, 0.1787, 0.1802, 0.0889], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0201, 0.0202, 0.0188, 0.0216, 0.0209, 0.0224, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:50:59,938 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58141.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:51:01,774 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58144.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:51:05,920 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58151.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:51:09,349 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6535, 1.4216, 1.0159, 0.2163, 1.1743, 1.5310, 1.4959, 1.3922], + device='cuda:5'), covar=tensor([0.0796, 0.0912, 0.1343, 0.1939, 0.1519, 0.2455, 0.2241, 0.0879], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0201, 0.0202, 0.0188, 0.0216, 0.0208, 0.0223, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:51:23,204 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=58166.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:51:33,584 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7107, 1.5404, 2.1063, 3.3802, 2.1933, 2.3929, 0.9381, 2.6750], + device='cuda:5'), covar=tensor([0.1773, 0.1433, 0.1326, 0.0586, 0.0827, 0.1488, 0.1923, 0.0609], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0163, 0.0101, 0.0137, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:51:35,955 INFO [finetune.py:976] (5/7) Epoch 11, batch 900, loss[loss=0.1503, simple_loss=0.2314, pruned_loss=0.03463, over 4761.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2567, pruned_loss=0.06278, over 945341.53 frames. ], batch size: 28, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:51:57,409 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=58199.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:51:59,476 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58202.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:52:01,305 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58205.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:52:12,171 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6295, 1.6280, 1.4906, 1.5762, 1.0180, 3.3004, 1.4935, 1.9903], + device='cuda:5'), covar=tensor([0.3275, 0.2237, 0.2032, 0.2269, 0.1811, 0.0212, 0.2749, 0.1233], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0121, 0.0123, 0.0115, 0.0098, 0.0099, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:52:17,500 INFO [finetune.py:976] (5/7) Epoch 11, batch 950, loss[loss=0.204, simple_loss=0.249, pruned_loss=0.0795, over 4420.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2564, pruned_loss=0.06309, over 947029.41 frames. ], batch size: 19, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:52:22,879 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.516e+02 1.975e+02 2.310e+02 4.008e+02, threshold=3.950e+02, percent-clipped=1.0 +2023-03-26 12:52:33,973 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5934, 1.3745, 2.1820, 3.2095, 2.0948, 2.3435, 0.9686, 2.5301], + device='cuda:5'), covar=tensor([0.1741, 0.1592, 0.1243, 0.0585, 0.0861, 0.1532, 0.1868, 0.0582], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0133, 0.0163, 0.0101, 0.0138, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:52:51,454 INFO [finetune.py:976] (5/7) Epoch 11, batch 1000, loss[loss=0.1764, simple_loss=0.2454, pruned_loss=0.0537, over 4681.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2591, pruned_loss=0.06404, over 950180.86 frames. ], batch size: 23, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:53:46,405 INFO [finetune.py:976] (5/7) Epoch 11, batch 1050, loss[loss=0.2804, simple_loss=0.3221, pruned_loss=0.1193, over 4292.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2627, pruned_loss=0.06504, over 952289.24 frames. ], batch size: 65, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:53:51,320 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.197e+02 1.617e+02 2.003e+02 2.375e+02 3.670e+02, threshold=4.006e+02, percent-clipped=0.0 +2023-03-26 12:54:02,559 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.89 vs. limit=5.0 +2023-03-26 12:54:11,828 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1194, 1.9112, 1.9303, 0.9215, 2.1505, 2.3461, 1.9816, 1.8047], + device='cuda:5'), covar=tensor([0.0870, 0.0625, 0.0426, 0.0738, 0.0464, 0.0508, 0.0414, 0.0671], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0152, 0.0119, 0.0131, 0.0129, 0.0122, 0.0141, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.3441e-05, 1.1144e-04, 8.5891e-05, 9.4605e-05, 9.1906e-05, 8.9067e-05, + 1.0343e-04, 1.0546e-04], device='cuda:5') +2023-03-26 12:54:42,550 INFO [finetune.py:976] (5/7) Epoch 11, batch 1100, loss[loss=0.2074, simple_loss=0.2788, pruned_loss=0.06799, over 4898.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2629, pruned_loss=0.06529, over 953565.77 frames. ], batch size: 43, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:54:55,682 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58389.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:55:15,880 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 12:55:23,529 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9579, 1.8185, 1.6718, 1.9976, 2.4440, 2.0224, 1.9064, 1.6856], + device='cuda:5'), covar=tensor([0.1686, 0.1813, 0.1563, 0.1318, 0.1737, 0.1013, 0.2114, 0.1590], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0207, 0.0207, 0.0189, 0.0241, 0.0181, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:55:35,371 INFO [finetune.py:976] (5/7) Epoch 11, batch 1150, loss[loss=0.1684, simple_loss=0.2364, pruned_loss=0.05019, over 4867.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2629, pruned_loss=0.06533, over 951234.21 frames. ], batch size: 34, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:55:40,654 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.672e+02 1.870e+02 2.321e+02 4.403e+02, threshold=3.740e+02, percent-clipped=1.0 +2023-03-26 12:55:41,941 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=58437.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:55:42,012 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3699, 1.4427, 1.5401, 0.8928, 1.4433, 1.6546, 1.6827, 1.2956], + device='cuda:5'), covar=tensor([0.0821, 0.0578, 0.0474, 0.0474, 0.0438, 0.0513, 0.0333, 0.0647], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0152, 0.0119, 0.0131, 0.0129, 0.0122, 0.0141, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.3444e-05, 1.1168e-04, 8.5887e-05, 9.4596e-05, 9.1801e-05, 8.9122e-05, + 1.0344e-04, 1.0542e-04], device='cuda:5') +2023-03-26 12:55:54,580 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4178, 1.3880, 1.3412, 1.3662, 1.0244, 2.2298, 0.8944, 1.3489], + device='cuda:5'), covar=tensor([0.3866, 0.2770, 0.2286, 0.2844, 0.1692, 0.0483, 0.2599, 0.1242], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0119, 0.0122, 0.0115, 0.0097, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:56:08,438 INFO [finetune.py:976] (5/7) Epoch 11, batch 1200, loss[loss=0.1509, simple_loss=0.2224, pruned_loss=0.03972, over 4819.00 frames. ], tot_loss[loss=0.196, simple_loss=0.262, pruned_loss=0.06501, over 952427.88 frames. ], batch size: 38, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:56:08,569 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0740, 1.9840, 1.7546, 2.1731, 2.7295, 2.2206, 1.9946, 1.6293], + device='cuda:5'), covar=tensor([0.1989, 0.2044, 0.1722, 0.1502, 0.1604, 0.1017, 0.2126, 0.1808], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0208, 0.0207, 0.0189, 0.0242, 0.0181, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:56:15,630 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1799, 2.0616, 1.7322, 2.0943, 2.1019, 1.7832, 2.5081, 2.1742], + device='cuda:5'), covar=tensor([0.1208, 0.2159, 0.2996, 0.2470, 0.2550, 0.1559, 0.3051, 0.1712], + device='cuda:5'), in_proj_covar=tensor([0.0174, 0.0187, 0.0231, 0.0253, 0.0238, 0.0195, 0.0211, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:56:21,456 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58497.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:56:23,265 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58500.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:56:37,016 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7555, 1.4390, 2.0890, 1.4852, 1.7081, 1.8980, 1.4736, 2.0055], + device='cuda:5'), covar=tensor([0.1078, 0.2058, 0.0988, 0.1442, 0.0976, 0.1161, 0.2797, 0.0807], + device='cuda:5'), in_proj_covar=tensor([0.0199, 0.0207, 0.0194, 0.0192, 0.0179, 0.0216, 0.0219, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:56:40,497 INFO [finetune.py:976] (5/7) Epoch 11, batch 1250, loss[loss=0.1826, simple_loss=0.2444, pruned_loss=0.06043, over 4796.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2596, pruned_loss=0.0647, over 953264.97 frames. ], batch size: 25, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:56:46,792 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.332e+01 1.581e+02 1.822e+02 2.261e+02 4.369e+02, threshold=3.644e+02, percent-clipped=3.0 +2023-03-26 12:57:08,449 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7090, 0.6931, 1.6719, 1.5327, 1.4981, 1.4014, 1.4582, 1.5921], + device='cuda:5'), covar=tensor([0.3837, 0.4533, 0.3803, 0.3892, 0.4723, 0.3914, 0.4765, 0.3590], + device='cuda:5'), in_proj_covar=tensor([0.0236, 0.0238, 0.0252, 0.0256, 0.0252, 0.0228, 0.0273, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 12:57:15,452 INFO [finetune.py:976] (5/7) Epoch 11, batch 1300, loss[loss=0.1529, simple_loss=0.2199, pruned_loss=0.04296, over 4760.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2566, pruned_loss=0.06346, over 952754.93 frames. ], batch size: 28, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:57:30,723 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8415, 1.6628, 2.2234, 3.4497, 2.3611, 2.3197, 1.0158, 2.6215], + device='cuda:5'), covar=tensor([0.1711, 0.1429, 0.1382, 0.0640, 0.0822, 0.1441, 0.1996, 0.0667], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0100, 0.0137, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 12:57:48,892 INFO [finetune.py:976] (5/7) Epoch 11, batch 1350, loss[loss=0.2197, simple_loss=0.2864, pruned_loss=0.07648, over 4810.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2578, pruned_loss=0.06402, over 953878.12 frames. ], batch size: 45, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:57:54,730 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.484e+01 1.581e+02 1.914e+02 2.266e+02 4.857e+02, threshold=3.829e+02, percent-clipped=2.0 +2023-03-26 12:58:20,729 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-26 12:58:23,952 INFO [finetune.py:976] (5/7) Epoch 11, batch 1400, loss[loss=0.1718, simple_loss=0.2267, pruned_loss=0.05849, over 4698.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2617, pruned_loss=0.06553, over 953251.46 frames. ], batch size: 23, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:58:27,006 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58681.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:58:56,043 INFO [finetune.py:976] (5/7) Epoch 11, batch 1450, loss[loss=0.2018, simple_loss=0.2681, pruned_loss=0.06772, over 4885.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2626, pruned_loss=0.06517, over 954476.60 frames. ], batch size: 32, lr: 3.71e-03, grad_scale: 16.0 +2023-03-26 12:59:00,192 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7455, 1.6782, 1.5792, 1.6899, 1.2863, 4.0170, 1.6407, 2.0108], + device='cuda:5'), covar=tensor([0.3232, 0.2303, 0.2015, 0.2251, 0.1715, 0.0136, 0.2711, 0.1275], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0123, 0.0115, 0.0098, 0.0099, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:59:01,961 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.107e+02 1.669e+02 2.009e+02 2.318e+02 4.324e+02, threshold=4.017e+02, percent-clipped=1.0 +2023-03-26 12:59:07,264 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58742.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:59:26,243 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5556, 1.5375, 1.8378, 1.8135, 1.5591, 3.4527, 1.3473, 1.6142], + device='cuda:5'), covar=tensor([0.0935, 0.1757, 0.1137, 0.0988, 0.1632, 0.0225, 0.1497, 0.1744], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0074, 0.0077, 0.0091, 0.0081, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 12:59:35,093 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58775.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 12:59:36,229 INFO [finetune.py:976] (5/7) Epoch 11, batch 1500, loss[loss=0.1689, simple_loss=0.228, pruned_loss=0.05497, over 4708.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2636, pruned_loss=0.06579, over 950555.98 frames. ], batch size: 23, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 12:59:58,355 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58797.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:00:04,298 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58800.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:00:13,609 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4342, 2.2923, 1.7070, 2.3586, 2.3551, 1.9750, 2.8214, 2.3681], + device='cuda:5'), covar=tensor([0.1327, 0.2803, 0.3410, 0.3260, 0.2684, 0.1743, 0.3267, 0.2043], + device='cuda:5'), in_proj_covar=tensor([0.0175, 0.0188, 0.0232, 0.0255, 0.0240, 0.0196, 0.0212, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:00:17,199 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-03-26 13:00:34,186 INFO [finetune.py:976] (5/7) Epoch 11, batch 1550, loss[loss=0.2429, simple_loss=0.3099, pruned_loss=0.08792, over 4894.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2639, pruned_loss=0.06547, over 950811.75 frames. ], batch size: 43, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:00:39,951 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.569e+02 1.959e+02 2.197e+02 4.059e+02, threshold=3.918e+02, percent-clipped=1.0 +2023-03-26 13:00:41,208 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58836.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:00:47,623 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=58845.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:00:49,961 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=58848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:01:07,921 INFO [finetune.py:976] (5/7) Epoch 11, batch 1600, loss[loss=0.1846, simple_loss=0.2473, pruned_loss=0.06097, over 4820.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2632, pruned_loss=0.06642, over 951891.84 frames. ], batch size: 33, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:01:28,380 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58906.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:01:50,385 INFO [finetune.py:976] (5/7) Epoch 11, batch 1650, loss[loss=0.1877, simple_loss=0.2556, pruned_loss=0.05987, over 4828.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2607, pruned_loss=0.06551, over 953214.17 frames. ], batch size: 41, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:01:55,162 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 13:01:55,255 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.664e+02 1.923e+02 2.390e+02 4.121e+02, threshold=3.846e+02, percent-clipped=1.0 +2023-03-26 13:02:02,855 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4872, 2.8060, 2.4584, 1.8521, 2.4970, 2.8156, 2.8786, 2.4768], + device='cuda:5'), covar=tensor([0.0657, 0.0502, 0.0754, 0.0994, 0.0868, 0.0720, 0.0567, 0.0887], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0134, 0.0143, 0.0126, 0.0122, 0.0145, 0.0145, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:02:18,273 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58967.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:02:19,483 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4406, 1.4752, 1.6937, 1.7516, 1.5443, 3.1175, 1.2547, 1.6051], + device='cuda:5'), covar=tensor([0.0948, 0.1616, 0.1275, 0.0966, 0.1442, 0.0268, 0.1433, 0.1541], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0080, 0.0074, 0.0077, 0.0090, 0.0080, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:02:24,167 INFO [finetune.py:976] (5/7) Epoch 11, batch 1700, loss[loss=0.2176, simple_loss=0.2734, pruned_loss=0.08088, over 4820.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2578, pruned_loss=0.06428, over 953670.32 frames. ], batch size: 38, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:02:57,903 INFO [finetune.py:976] (5/7) Epoch 11, batch 1750, loss[loss=0.2, simple_loss=0.2657, pruned_loss=0.06717, over 4767.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2587, pruned_loss=0.06423, over 954557.93 frames. ], batch size: 27, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:03:02,754 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.620e+02 1.895e+02 2.249e+02 5.052e+02, threshold=3.790e+02, percent-clipped=2.0 +2023-03-26 13:03:04,075 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59037.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:03:33,672 INFO [finetune.py:976] (5/7) Epoch 11, batch 1800, loss[loss=0.1837, simple_loss=0.2575, pruned_loss=0.05499, over 4865.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2618, pruned_loss=0.06506, over 953990.40 frames. ], batch size: 31, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:03:34,665 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4666, 1.2905, 1.7412, 2.9031, 1.8534, 2.0689, 0.8358, 2.3469], + device='cuda:5'), covar=tensor([0.1866, 0.1622, 0.1350, 0.0668, 0.0971, 0.1389, 0.1926, 0.0668], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0132, 0.0163, 0.0101, 0.0137, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 13:04:02,987 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2029, 3.6374, 3.8435, 4.0953, 3.9915, 3.7332, 4.2890, 1.3678], + device='cuda:5'), covar=tensor([0.0782, 0.0787, 0.0850, 0.0920, 0.1168, 0.1383, 0.0666, 0.5452], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0244, 0.0275, 0.0292, 0.0332, 0.0284, 0.0301, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:04:19,597 INFO [finetune.py:976] (5/7) Epoch 11, batch 1850, loss[loss=0.1624, simple_loss=0.2379, pruned_loss=0.04348, over 4781.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2631, pruned_loss=0.06555, over 954193.00 frames. ], batch size: 25, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:04:22,098 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59131.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:04:24,431 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.037e+02 1.668e+02 2.065e+02 2.636e+02 4.490e+02, threshold=4.130e+02, percent-clipped=5.0 +2023-03-26 13:04:43,998 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59164.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:04:57,368 INFO [finetune.py:976] (5/7) Epoch 11, batch 1900, loss[loss=0.2095, simple_loss=0.2822, pruned_loss=0.06839, over 4889.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2649, pruned_loss=0.06639, over 953744.03 frames. ], batch size: 32, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:05:46,835 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59225.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:05:47,935 INFO [finetune.py:976] (5/7) Epoch 11, batch 1950, loss[loss=0.2404, simple_loss=0.2889, pruned_loss=0.096, over 4807.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2642, pruned_loss=0.06652, over 954610.30 frames. ], batch size: 40, lr: 3.70e-03, grad_scale: 16.0 +2023-03-26 13:05:59,308 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.090e+02 1.570e+02 1.817e+02 2.294e+02 4.310e+02, threshold=3.633e+02, percent-clipped=1.0 +2023-03-26 13:06:08,275 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 13:06:23,123 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 13:06:30,357 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59262.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:06:51,936 INFO [finetune.py:976] (5/7) Epoch 11, batch 2000, loss[loss=0.2009, simple_loss=0.2703, pruned_loss=0.0658, over 4905.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2615, pruned_loss=0.06584, over 956832.34 frames. ], batch size: 36, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:07:03,825 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0128, 1.9384, 1.6045, 1.8116, 2.0173, 1.6923, 2.2419, 2.0270], + device='cuda:5'), covar=tensor([0.1332, 0.2068, 0.3048, 0.2442, 0.2514, 0.1616, 0.3055, 0.1706], + device='cuda:5'), in_proj_covar=tensor([0.0174, 0.0186, 0.0230, 0.0252, 0.0237, 0.0195, 0.0210, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:07:15,797 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1693, 2.1645, 2.1982, 1.5885, 2.1629, 2.2826, 2.3207, 1.8482], + device='cuda:5'), covar=tensor([0.0555, 0.0579, 0.0660, 0.0945, 0.0611, 0.0698, 0.0592, 0.1032], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0133, 0.0142, 0.0125, 0.0121, 0.0143, 0.0144, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:07:37,498 INFO [finetune.py:976] (5/7) Epoch 11, batch 2050, loss[loss=0.1462, simple_loss=0.2229, pruned_loss=0.03477, over 4785.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2579, pruned_loss=0.06424, over 958453.90 frames. ], batch size: 28, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:07:42,279 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.122e+01 1.513e+02 1.843e+02 2.174e+02 3.611e+02, threshold=3.686e+02, percent-clipped=0.0 +2023-03-26 13:07:50,360 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59337.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:08:00,509 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.68 vs. limit=5.0 +2023-03-26 13:08:17,288 INFO [finetune.py:976] (5/7) Epoch 11, batch 2100, loss[loss=0.1991, simple_loss=0.2614, pruned_loss=0.0684, over 4755.00 frames. ], tot_loss[loss=0.1923, simple_loss=0.2569, pruned_loss=0.06388, over 958608.73 frames. ], batch size: 28, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:08:27,934 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=59385.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:09:08,921 INFO [finetune.py:976] (5/7) Epoch 11, batch 2150, loss[loss=0.2614, simple_loss=0.3293, pruned_loss=0.09677, over 4743.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2597, pruned_loss=0.06535, over 956604.31 frames. ], batch size: 59, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:09:13,329 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59431.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:09:15,688 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.596e+02 1.893e+02 2.254e+02 5.168e+02, threshold=3.786e+02, percent-clipped=3.0 +2023-03-26 13:09:18,968 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-03-26 13:09:51,476 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0758, 1.8151, 1.5898, 1.5054, 1.8165, 1.8387, 1.8071, 2.4930], + device='cuda:5'), covar=tensor([0.4542, 0.3836, 0.3672, 0.4278, 0.4153, 0.2690, 0.3814, 0.1876], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0225, 0.0281, 0.0244, 0.0211, 0.0248, 0.0217], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:09:54,869 INFO [finetune.py:976] (5/7) Epoch 11, batch 2200, loss[loss=0.2316, simple_loss=0.3024, pruned_loss=0.08036, over 4911.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2629, pruned_loss=0.06605, over 956645.02 frames. ], batch size: 37, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:09:56,667 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=59479.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:10:13,064 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6673, 1.5243, 1.9585, 1.9783, 1.6964, 3.6585, 1.4070, 1.6886], + device='cuda:5'), covar=tensor([0.0967, 0.1780, 0.1032, 0.0960, 0.1603, 0.0297, 0.1594, 0.1729], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0074, 0.0077, 0.0091, 0.0081, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:10:13,088 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59502.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:10:15,550 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5232, 2.7344, 2.5282, 1.7659, 2.5915, 2.8018, 2.7760, 2.3769], + device='cuda:5'), covar=tensor([0.0519, 0.0472, 0.0567, 0.0867, 0.0791, 0.0583, 0.0504, 0.0816], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0133, 0.0142, 0.0126, 0.0120, 0.0143, 0.0144, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:10:25,077 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59520.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:10:30,589 INFO [finetune.py:976] (5/7) Epoch 11, batch 2250, loss[loss=0.2016, simple_loss=0.2766, pruned_loss=0.06327, over 4752.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2638, pruned_loss=0.06616, over 955760.53 frames. ], batch size: 54, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:10:31,271 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5835, 3.3797, 3.2704, 1.5748, 3.5221, 2.5403, 0.8365, 2.2184], + device='cuda:5'), covar=tensor([0.2392, 0.2058, 0.1566, 0.3472, 0.1083, 0.1183, 0.4266, 0.1577], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0173, 0.0160, 0.0129, 0.0156, 0.0121, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 13:10:37,560 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.729e+02 2.023e+02 2.518e+02 3.990e+02, threshold=4.047e+02, percent-clipped=2.0 +2023-03-26 13:11:02,695 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59562.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:11:02,744 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5753, 1.7739, 1.9005, 1.0075, 1.9373, 2.0834, 1.9908, 1.5375], + device='cuda:5'), covar=tensor([0.0926, 0.0574, 0.0418, 0.0588, 0.0402, 0.0621, 0.0315, 0.0705], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0153, 0.0121, 0.0132, 0.0130, 0.0124, 0.0142, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.4078e-05, 1.1222e-04, 8.7132e-05, 9.5434e-05, 9.2410e-05, 9.0035e-05, + 1.0425e-04, 1.0649e-04], device='cuda:5') +2023-03-26 13:11:03,353 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59563.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:11:13,471 INFO [finetune.py:976] (5/7) Epoch 11, batch 2300, loss[loss=0.1535, simple_loss=0.2308, pruned_loss=0.0381, over 4896.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2637, pruned_loss=0.06553, over 954036.26 frames. ], batch size: 46, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:11:20,703 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5601, 1.4201, 1.1612, 1.3074, 1.8525, 1.7549, 1.6413, 1.3170], + device='cuda:5'), covar=tensor([0.0345, 0.0347, 0.0851, 0.0380, 0.0220, 0.0384, 0.0302, 0.0439], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0109, 0.0140, 0.0114, 0.0102, 0.0103, 0.0093, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0718e-05, 8.5111e-05, 1.1125e-04, 8.9205e-05, 7.9544e-05, 7.6647e-05, + 7.0369e-05, 8.3044e-05], device='cuda:5') +2023-03-26 13:11:22,580 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-03-26 13:11:35,297 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=59610.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:11:47,098 INFO [finetune.py:976] (5/7) Epoch 11, batch 2350, loss[loss=0.1757, simple_loss=0.2482, pruned_loss=0.05157, over 4831.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2608, pruned_loss=0.06463, over 956070.60 frames. ], batch size: 33, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:11:52,462 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.309e+01 1.451e+02 1.728e+02 2.097e+02 4.600e+02, threshold=3.455e+02, percent-clipped=1.0 +2023-03-26 13:12:19,965 INFO [finetune.py:976] (5/7) Epoch 11, batch 2400, loss[loss=0.2013, simple_loss=0.2557, pruned_loss=0.07347, over 4827.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2566, pruned_loss=0.06329, over 954884.29 frames. ], batch size: 33, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:12:53,270 INFO [finetune.py:976] (5/7) Epoch 11, batch 2450, loss[loss=0.2573, simple_loss=0.3031, pruned_loss=0.1057, over 4816.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2549, pruned_loss=0.06309, over 955673.06 frames. ], batch size: 40, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:13:01,211 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.102e+02 1.641e+02 1.877e+02 2.149e+02 5.374e+02, threshold=3.753e+02, percent-clipped=2.0 +2023-03-26 13:13:37,051 INFO [finetune.py:976] (5/7) Epoch 11, batch 2500, loss[loss=0.1776, simple_loss=0.2408, pruned_loss=0.0572, over 4759.00 frames. ], tot_loss[loss=0.1921, simple_loss=0.2565, pruned_loss=0.06389, over 955665.38 frames. ], batch size: 28, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:14:29,698 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59820.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:14:33,895 INFO [finetune.py:976] (5/7) Epoch 11, batch 2550, loss[loss=0.1758, simple_loss=0.2386, pruned_loss=0.05654, over 4725.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2599, pruned_loss=0.0646, over 955862.70 frames. ], batch size: 23, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:14:40,182 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.636e+02 1.885e+02 2.323e+02 4.849e+02, threshold=3.771e+02, percent-clipped=2.0 +2023-03-26 13:14:49,246 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:14:57,197 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59858.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:15:03,806 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=59868.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:15:08,724 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5946, 1.5246, 1.4860, 1.5581, 1.0617, 3.3473, 1.3370, 1.7097], + device='cuda:5'), covar=tensor([0.3272, 0.2439, 0.2160, 0.2375, 0.1881, 0.0222, 0.2881, 0.1336], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0119, 0.0123, 0.0115, 0.0098, 0.0099, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:15:09,219 INFO [finetune.py:976] (5/7) Epoch 11, batch 2600, loss[loss=0.2036, simple_loss=0.2633, pruned_loss=0.07195, over 4861.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2629, pruned_loss=0.06609, over 956934.55 frames. ], batch size: 44, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:15:15,206 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7924, 1.7584, 1.5995, 1.9665, 2.1621, 1.9856, 1.4037, 1.5074], + device='cuda:5'), covar=tensor([0.2265, 0.2000, 0.1960, 0.1617, 0.1741, 0.1188, 0.2547, 0.2022], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0206, 0.0208, 0.0189, 0.0241, 0.0182, 0.0212, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:15:18,120 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59889.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:15:31,240 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59909.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:15:42,445 INFO [finetune.py:976] (5/7) Epoch 11, batch 2650, loss[loss=0.1678, simple_loss=0.25, pruned_loss=0.04283, over 4811.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2631, pruned_loss=0.06544, over 956199.17 frames. ], batch size: 39, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:15:47,334 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.549e+02 1.976e+02 2.444e+02 3.877e+02, threshold=3.952e+02, percent-clipped=1.0 +2023-03-26 13:15:48,052 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1269, 1.6684, 2.4551, 3.8567, 2.7204, 2.6707, 0.8965, 3.0099], + device='cuda:5'), covar=tensor([0.1632, 0.1616, 0.1341, 0.0608, 0.0719, 0.1776, 0.2013, 0.0506], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0165, 0.0101, 0.0138, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 13:16:03,048 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59950.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:16:29,563 INFO [finetune.py:976] (5/7) Epoch 11, batch 2700, loss[loss=0.1626, simple_loss=0.2253, pruned_loss=0.04994, over 4755.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2616, pruned_loss=0.06456, over 955084.99 frames. ], batch size: 23, lr: 3.70e-03, grad_scale: 32.0 +2023-03-26 13:16:38,535 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8692, 1.6033, 2.2109, 1.4967, 1.9830, 2.1673, 1.5580, 2.2866], + device='cuda:5'), covar=tensor([0.1272, 0.2066, 0.1340, 0.1936, 0.0851, 0.1443, 0.2803, 0.0877], + device='cuda:5'), in_proj_covar=tensor([0.0198, 0.0206, 0.0194, 0.0190, 0.0178, 0.0215, 0.0218, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:16:42,168 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 13:17:04,323 INFO [finetune.py:976] (5/7) Epoch 11, batch 2750, loss[loss=0.164, simple_loss=0.2329, pruned_loss=0.04754, over 4894.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2591, pruned_loss=0.06335, over 954832.88 frames. ], batch size: 35, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:17:09,211 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.603e+02 1.823e+02 2.284e+02 4.397e+02, threshold=3.646e+02, percent-clipped=1.0 +2023-03-26 13:17:12,382 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60040.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:17:22,089 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60052.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:17:30,380 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60065.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:17:37,367 INFO [finetune.py:976] (5/7) Epoch 11, batch 2800, loss[loss=0.1889, simple_loss=0.2527, pruned_loss=0.06254, over 4897.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.255, pruned_loss=0.0617, over 955609.60 frames. ], batch size: 35, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:17:38,100 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60078.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:17:54,544 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60101.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 13:18:02,772 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60113.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 13:18:10,601 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60126.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:18:11,110 INFO [finetune.py:976] (5/7) Epoch 11, batch 2850, loss[loss=0.3312, simple_loss=0.3536, pruned_loss=0.1544, over 4031.00 frames. ], tot_loss[loss=0.19, simple_loss=0.255, pruned_loss=0.06246, over 954906.71 frames. ], batch size: 65, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:18:17,974 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.579e+02 1.818e+02 2.348e+02 4.165e+02, threshold=3.636e+02, percent-clipped=3.0 +2023-03-26 13:18:20,416 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1265, 1.3528, 1.1430, 1.2763, 1.4717, 2.4955, 1.3004, 1.5028], + device='cuda:5'), covar=tensor([0.1048, 0.1843, 0.1129, 0.1016, 0.1714, 0.0380, 0.1534, 0.1748], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0075, 0.0078, 0.0092, 0.0081, 0.0085, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:18:21,055 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60139.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:18:33,370 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6417, 1.4896, 1.3970, 1.5671, 1.8727, 1.7276, 1.5834, 1.3365], + device='cuda:5'), covar=tensor([0.0272, 0.0274, 0.0516, 0.0284, 0.0198, 0.0440, 0.0307, 0.0398], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0109, 0.0140, 0.0114, 0.0102, 0.0104, 0.0093, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.1103e-05, 8.4938e-05, 1.1142e-04, 8.9295e-05, 7.9718e-05, 7.6972e-05, + 7.0418e-05, 8.3216e-05], device='cuda:5') +2023-03-26 13:18:39,238 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6430, 1.2378, 0.9122, 1.5381, 2.0758, 1.0913, 1.5078, 1.5469], + device='cuda:5'), covar=tensor([0.1566, 0.2197, 0.1997, 0.1288, 0.1996, 0.2032, 0.1549, 0.2079], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0098, 0.0115, 0.0094, 0.0122, 0.0096, 0.0101, 0.0092], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 13:18:39,249 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60158.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:18:51,669 INFO [finetune.py:976] (5/7) Epoch 11, batch 2900, loss[loss=0.3082, simple_loss=0.3626, pruned_loss=0.127, over 4890.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2593, pruned_loss=0.06441, over 955574.20 frames. ], batch size: 43, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:19:12,024 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60198.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:19:21,318 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60204.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:19:27,580 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60206.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:19:51,300 INFO [finetune.py:976] (5/7) Epoch 11, batch 2950, loss[loss=0.1819, simple_loss=0.241, pruned_loss=0.06136, over 4781.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2638, pruned_loss=0.06555, over 957730.27 frames. ], batch size: 25, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:19:57,802 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8325, 4.1599, 3.8402, 1.9990, 4.2203, 2.9915, 0.8123, 2.8406], + device='cuda:5'), covar=tensor([0.2162, 0.1528, 0.1730, 0.3361, 0.0988, 0.1116, 0.4765, 0.1467], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0173, 0.0159, 0.0128, 0.0155, 0.0121, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 13:20:00,139 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.723e+02 2.035e+02 2.444e+02 4.360e+02, threshold=4.070e+02, percent-clipped=6.0 +2023-03-26 13:20:06,687 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60245.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:20:16,635 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60259.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:20:28,427 INFO [finetune.py:976] (5/7) Epoch 11, batch 3000, loss[loss=0.221, simple_loss=0.2935, pruned_loss=0.07421, over 4817.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2657, pruned_loss=0.06675, over 958538.67 frames. ], batch size: 39, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:20:28,427 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 13:20:30,198 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2114, 1.4012, 1.4983, 0.6833, 1.4269, 1.6445, 1.7148, 1.3521], + device='cuda:5'), covar=tensor([0.0876, 0.0634, 0.0502, 0.0523, 0.0445, 0.0591, 0.0333, 0.0777], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0153, 0.0121, 0.0132, 0.0130, 0.0124, 0.0143, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.4108e-05, 1.1193e-04, 8.6982e-05, 9.5482e-05, 9.2543e-05, 9.0303e-05, + 1.0486e-04, 1.0662e-04], device='cuda:5') +2023-03-26 13:20:35,183 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1990, 1.4631, 1.5582, 0.6939, 1.4351, 1.6606, 1.7386, 1.3987], + device='cuda:5'), covar=tensor([0.0838, 0.0641, 0.0450, 0.0491, 0.0474, 0.0643, 0.0364, 0.0669], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0153, 0.0121, 0.0132, 0.0130, 0.0124, 0.0143, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.4108e-05, 1.1193e-04, 8.6982e-05, 9.5482e-05, 9.2543e-05, 9.0303e-05, + 1.0486e-04, 1.0662e-04], device='cuda:5') +2023-03-26 13:20:38,898 INFO [finetune.py:1010] (5/7) Epoch 11, validation: loss=0.1572, simple_loss=0.2284, pruned_loss=0.04301, over 2265189.00 frames. +2023-03-26 13:20:38,899 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 13:21:06,331 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4652, 1.3412, 1.4006, 1.3481, 0.9562, 2.3045, 0.7734, 1.2958], + device='cuda:5'), covar=tensor([0.2938, 0.2236, 0.1990, 0.2214, 0.1689, 0.0314, 0.2628, 0.1222], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0123, 0.0115, 0.0098, 0.0099, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:21:13,697 INFO [finetune.py:976] (5/7) Epoch 11, batch 3050, loss[loss=0.1833, simple_loss=0.2488, pruned_loss=0.0589, over 4888.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2662, pruned_loss=0.06669, over 958460.12 frames. ], batch size: 35, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:21:19,482 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.587e+02 1.939e+02 2.482e+02 4.597e+02, threshold=3.877e+02, percent-clipped=2.0 +2023-03-26 13:21:33,202 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2108, 2.8871, 3.0577, 3.0484, 2.8421, 2.7323, 3.2432, 1.0674], + device='cuda:5'), covar=tensor([0.1654, 0.1763, 0.1858, 0.2191, 0.2586, 0.2733, 0.1609, 0.7351], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0243, 0.0275, 0.0292, 0.0330, 0.0284, 0.0302, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:21:56,081 INFO [finetune.py:976] (5/7) Epoch 11, batch 3100, loss[loss=0.1905, simple_loss=0.257, pruned_loss=0.06206, over 4819.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2629, pruned_loss=0.06526, over 957521.38 frames. ], batch size: 33, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:21:56,846 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8793, 1.7128, 1.5985, 1.4967, 1.9279, 1.6718, 1.8760, 1.8555], + device='cuda:5'), covar=tensor([0.1336, 0.2194, 0.3179, 0.2483, 0.2482, 0.1703, 0.2966, 0.1856], + device='cuda:5'), in_proj_covar=tensor([0.0175, 0.0186, 0.0231, 0.0253, 0.0237, 0.0195, 0.0210, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:22:08,707 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60396.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 13:22:16,701 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60408.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 13:22:25,059 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60421.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:22:29,565 INFO [finetune.py:976] (5/7) Epoch 11, batch 3150, loss[loss=0.2446, simple_loss=0.299, pruned_loss=0.09506, over 4866.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2602, pruned_loss=0.06451, over 958123.15 frames. ], batch size: 44, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:22:34,368 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60434.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:22:34,874 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.624e+02 1.838e+02 2.200e+02 4.980e+02, threshold=3.676e+02, percent-clipped=1.0 +2023-03-26 13:23:01,695 INFO [finetune.py:976] (5/7) Epoch 11, batch 3200, loss[loss=0.1777, simple_loss=0.2385, pruned_loss=0.05847, over 4920.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2556, pruned_loss=0.06245, over 958769.38 frames. ], batch size: 28, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:23:20,584 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60504.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:23:37,315 INFO [finetune.py:976] (5/7) Epoch 11, batch 3250, loss[loss=0.1704, simple_loss=0.2415, pruned_loss=0.04969, over 4802.00 frames. ], tot_loss[loss=0.1904, simple_loss=0.2559, pruned_loss=0.06242, over 957655.68 frames. ], batch size: 25, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:23:48,950 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.626e+02 1.982e+02 2.397e+02 3.737e+02, threshold=3.964e+02, percent-clipped=1.0 +2023-03-26 13:23:59,841 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60545.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:24:04,035 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60552.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:24:05,273 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60554.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:24:11,662 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-03-26 13:24:27,372 INFO [finetune.py:976] (5/7) Epoch 11, batch 3300, loss[loss=0.2118, simple_loss=0.2761, pruned_loss=0.07374, over 4870.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2601, pruned_loss=0.06401, over 957661.14 frames. ], batch size: 34, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:24:45,614 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60593.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:24:58,724 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 13:25:16,492 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3014, 1.3814, 1.4037, 1.6433, 1.5176, 3.0307, 1.2706, 1.4517], + device='cuda:5'), covar=tensor([0.1307, 0.2399, 0.1460, 0.1237, 0.2023, 0.0352, 0.2218, 0.2445], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0078, 0.0092, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:25:28,937 INFO [finetune.py:976] (5/7) Epoch 11, batch 3350, loss[loss=0.1714, simple_loss=0.2427, pruned_loss=0.05005, over 4923.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2621, pruned_loss=0.06508, over 956107.85 frames. ], batch size: 33, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:25:34,912 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.073e+02 1.701e+02 2.036e+02 2.469e+02 3.577e+02, threshold=4.071e+02, percent-clipped=0.0 +2023-03-26 13:26:01,384 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-26 13:26:02,930 INFO [finetune.py:976] (5/7) Epoch 11, batch 3400, loss[loss=0.1788, simple_loss=0.2381, pruned_loss=0.05973, over 4723.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2639, pruned_loss=0.06612, over 954693.11 frames. ], batch size: 23, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:26:12,426 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1064, 1.9823, 2.0229, 1.4379, 2.0566, 2.2056, 2.0777, 1.6470], + device='cuda:5'), covar=tensor([0.0513, 0.0595, 0.0668, 0.0917, 0.0615, 0.0602, 0.0580, 0.1124], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0133, 0.0141, 0.0124, 0.0120, 0.0143, 0.0143, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:26:17,000 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60696.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:26:24,737 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60708.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:26:30,793 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60718.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:26:32,542 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60721.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:26:36,599 INFO [finetune.py:976] (5/7) Epoch 11, batch 3450, loss[loss=0.1659, simple_loss=0.2378, pruned_loss=0.04695, over 4865.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2627, pruned_loss=0.06515, over 957202.83 frames. ], batch size: 34, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:26:41,020 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60734.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:26:41,511 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.902e+01 1.594e+02 1.892e+02 2.253e+02 3.493e+02, threshold=3.783e+02, percent-clipped=0.0 +2023-03-26 13:26:52,732 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60744.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:27:06,425 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60756.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:27:25,456 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60769.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:27:36,415 INFO [finetune.py:976] (5/7) Epoch 11, batch 3500, loss[loss=0.2361, simple_loss=0.295, pruned_loss=0.08858, over 4749.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2612, pruned_loss=0.06464, over 956876.99 frames. ], batch size: 59, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:27:37,768 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60779.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:27:45,019 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60782.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:28:14,137 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3669, 2.1248, 1.7244, 2.0480, 2.0118, 1.9751, 2.1049, 2.8787], + device='cuda:5'), covar=tensor([0.4042, 0.4992, 0.3712, 0.4407, 0.4685, 0.2549, 0.4280, 0.1632], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0260, 0.0224, 0.0277, 0.0244, 0.0210, 0.0245, 0.0215], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:28:15,226 INFO [finetune.py:976] (5/7) Epoch 11, batch 3550, loss[loss=0.1683, simple_loss=0.2269, pruned_loss=0.05485, over 4828.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2579, pruned_loss=0.06396, over 955807.06 frames. ], batch size: 39, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:28:20,665 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.181e+02 1.566e+02 1.863e+02 2.348e+02 4.575e+02, threshold=3.726e+02, percent-clipped=4.0 +2023-03-26 13:28:22,665 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-03-26 13:28:34,187 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60854.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:28:47,391 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0495, 1.8630, 1.6070, 1.8420, 1.7526, 1.7744, 1.7895, 2.5903], + device='cuda:5'), covar=tensor([0.4254, 0.4802, 0.3800, 0.4842, 0.4955, 0.2613, 0.4552, 0.1819], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0260, 0.0223, 0.0277, 0.0243, 0.0209, 0.0245, 0.0215], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:28:49,077 INFO [finetune.py:976] (5/7) Epoch 11, batch 3600, loss[loss=0.1961, simple_loss=0.2687, pruned_loss=0.06176, over 4815.00 frames. ], tot_loss[loss=0.1912, simple_loss=0.2556, pruned_loss=0.06337, over 956777.06 frames. ], batch size: 51, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:29:17,738 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=60902.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:29:39,494 INFO [finetune.py:976] (5/7) Epoch 11, batch 3650, loss[loss=0.2225, simple_loss=0.2977, pruned_loss=0.07362, over 4853.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2598, pruned_loss=0.06541, over 956957.31 frames. ], batch size: 49, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:29:44,363 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.638e+02 1.962e+02 2.312e+02 3.604e+02, threshold=3.924e+02, percent-clipped=0.0 +2023-03-26 13:30:33,787 INFO [finetune.py:976] (5/7) Epoch 11, batch 3700, loss[loss=0.1569, simple_loss=0.2259, pruned_loss=0.04391, over 4790.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2618, pruned_loss=0.06519, over 955364.93 frames. ], batch size: 25, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:31:15,829 INFO [finetune.py:976] (5/7) Epoch 11, batch 3750, loss[loss=0.1899, simple_loss=0.259, pruned_loss=0.06041, over 4747.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2626, pruned_loss=0.06524, over 953908.29 frames. ], batch size: 27, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:31:16,571 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3140, 1.5194, 1.2279, 1.4926, 1.7176, 1.5427, 1.4280, 1.2605], + device='cuda:5'), covar=tensor([0.0298, 0.0208, 0.0518, 0.0219, 0.0166, 0.0374, 0.0253, 0.0313], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0108, 0.0140, 0.0114, 0.0102, 0.0103, 0.0093, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0648e-05, 8.3875e-05, 1.1157e-04, 8.9046e-05, 7.9648e-05, 7.6620e-05, + 7.0224e-05, 8.3029e-05], device='cuda:5') +2023-03-26 13:31:20,653 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.587e+02 1.819e+02 2.276e+02 4.586e+02, threshold=3.638e+02, percent-clipped=1.0 +2023-03-26 13:31:38,940 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61061.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:31:47,663 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61074.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:31:49,402 INFO [finetune.py:976] (5/7) Epoch 11, batch 3800, loss[loss=0.1901, simple_loss=0.2618, pruned_loss=0.05918, over 4721.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2628, pruned_loss=0.06464, over 954905.70 frames. ], batch size: 54, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:31:51,989 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8767, 1.8638, 1.6763, 2.1112, 2.5471, 2.1315, 1.7830, 1.4792], + device='cuda:5'), covar=tensor([0.2220, 0.2013, 0.1947, 0.1600, 0.1651, 0.1077, 0.2252, 0.2045], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0207, 0.0209, 0.0190, 0.0241, 0.0182, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:32:29,714 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61122.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:32:32,631 INFO [finetune.py:976] (5/7) Epoch 11, batch 3850, loss[loss=0.227, simple_loss=0.2877, pruned_loss=0.08312, over 4838.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2596, pruned_loss=0.06321, over 952965.11 frames. ], batch size: 49, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:32:37,920 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.075e+02 1.518e+02 1.864e+02 2.279e+02 4.215e+02, threshold=3.727e+02, percent-clipped=1.0 +2023-03-26 13:33:05,948 INFO [finetune.py:976] (5/7) Epoch 11, batch 3900, loss[loss=0.1769, simple_loss=0.2405, pruned_loss=0.05668, over 4742.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2576, pruned_loss=0.06283, over 953162.27 frames. ], batch size: 27, lr: 3.69e-03, grad_scale: 32.0 +2023-03-26 13:33:10,851 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-03-26 13:33:32,218 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1203, 0.9709, 1.0079, 0.3220, 0.9050, 1.1154, 1.1727, 0.9330], + device='cuda:5'), covar=tensor([0.0862, 0.0611, 0.0542, 0.0588, 0.0581, 0.0576, 0.0411, 0.0664], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0154, 0.0122, 0.0132, 0.0130, 0.0125, 0.0144, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.4274e-05, 1.1257e-04, 8.7674e-05, 9.5622e-05, 9.2950e-05, 9.0943e-05, + 1.0514e-04, 1.0689e-04], device='cuda:5') +2023-03-26 13:33:39,745 INFO [finetune.py:976] (5/7) Epoch 11, batch 3950, loss[loss=0.136, simple_loss=0.2077, pruned_loss=0.03211, over 4771.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2555, pruned_loss=0.06232, over 955953.17 frames. ], batch size: 26, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:33:39,931 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 13:33:45,061 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.129e+02 1.570e+02 1.907e+02 2.309e+02 4.377e+02, threshold=3.813e+02, percent-clipped=3.0 +2023-03-26 13:34:12,379 INFO [finetune.py:976] (5/7) Epoch 11, batch 4000, loss[loss=0.1928, simple_loss=0.2531, pruned_loss=0.06628, over 4796.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2563, pruned_loss=0.06355, over 951612.78 frames. ], batch size: 25, lr: 3.68e-03, grad_scale: 64.0 +2023-03-26 13:34:29,839 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3416, 2.2149, 2.0403, 1.2557, 2.1546, 1.8897, 1.7235, 2.0983], + device='cuda:5'), covar=tensor([0.0868, 0.0669, 0.1335, 0.1697, 0.1239, 0.1841, 0.1720, 0.0847], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0198, 0.0200, 0.0185, 0.0214, 0.0207, 0.0221, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:34:55,753 INFO [finetune.py:976] (5/7) Epoch 11, batch 4050, loss[loss=0.1904, simple_loss=0.2669, pruned_loss=0.05693, over 4929.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2599, pruned_loss=0.06448, over 952928.95 frames. ], batch size: 33, lr: 3.68e-03, grad_scale: 64.0 +2023-03-26 13:35:04,890 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.154e+02 1.652e+02 2.086e+02 2.571e+02 4.987e+02, threshold=4.171e+02, percent-clipped=6.0 +2023-03-26 13:35:12,209 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.33 vs. limit=5.0 +2023-03-26 13:35:37,306 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7687, 3.5207, 3.3383, 1.5723, 3.5889, 2.6852, 0.7323, 2.3456], + device='cuda:5'), covar=tensor([0.2408, 0.1999, 0.1893, 0.3322, 0.1080, 0.1073, 0.4500, 0.1557], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0174, 0.0160, 0.0128, 0.0156, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 13:35:41,833 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61374.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:35:43,532 INFO [finetune.py:976] (5/7) Epoch 11, batch 4100, loss[loss=0.1377, simple_loss=0.2088, pruned_loss=0.03335, over 4754.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2625, pruned_loss=0.06549, over 953976.26 frames. ], batch size: 26, lr: 3.68e-03, grad_scale: 64.0 +2023-03-26 13:35:47,586 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.62 vs. limit=5.0 +2023-03-26 13:36:01,001 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4317, 3.8746, 4.0338, 4.2671, 4.1364, 3.9410, 4.5702, 1.5265], + device='cuda:5'), covar=tensor([0.0833, 0.0812, 0.0831, 0.0993, 0.1323, 0.1636, 0.0650, 0.5468], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0242, 0.0275, 0.0289, 0.0330, 0.0282, 0.0300, 0.0293], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:36:16,507 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61417.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:36:20,075 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=61422.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:36:26,639 INFO [finetune.py:976] (5/7) Epoch 11, batch 4150, loss[loss=0.154, simple_loss=0.2168, pruned_loss=0.04559, over 4720.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2639, pruned_loss=0.06561, over 955278.02 frames. ], batch size: 23, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:36:32,502 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.629e+02 1.982e+02 2.519e+02 5.426e+02, threshold=3.964e+02, percent-clipped=4.0 +2023-03-26 13:36:33,123 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3390, 1.2032, 1.1687, 1.2694, 1.5925, 1.4406, 1.3108, 1.0622], + device='cuda:5'), covar=tensor([0.0290, 0.0290, 0.0594, 0.0308, 0.0222, 0.0372, 0.0295, 0.0395], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0109, 0.0141, 0.0114, 0.0102, 0.0104, 0.0093, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.1091e-05, 8.4532e-05, 1.1205e-04, 8.9592e-05, 7.9862e-05, 7.6740e-05, + 7.0357e-05, 8.2952e-05], device='cuda:5') +2023-03-26 13:36:46,470 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5525, 1.3708, 1.2721, 1.5298, 1.6160, 1.5614, 0.9962, 1.2851], + device='cuda:5'), covar=tensor([0.2334, 0.2253, 0.2138, 0.1752, 0.1678, 0.1292, 0.2672, 0.2015], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0207, 0.0208, 0.0189, 0.0241, 0.0182, 0.0211, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:36:48,610 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 13:36:59,826 INFO [finetune.py:976] (5/7) Epoch 11, batch 4200, loss[loss=0.2198, simple_loss=0.2857, pruned_loss=0.07698, over 4926.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2632, pruned_loss=0.06431, over 954729.48 frames. ], batch size: 33, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:37:15,144 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8019, 1.7626, 1.5993, 1.9682, 2.1264, 2.0370, 1.6463, 1.4598], + device='cuda:5'), covar=tensor([0.2123, 0.1918, 0.1817, 0.1562, 0.1844, 0.1116, 0.2363, 0.1964], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0207, 0.0208, 0.0189, 0.0240, 0.0181, 0.0211, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:37:35,244 INFO [finetune.py:976] (5/7) Epoch 11, batch 4250, loss[loss=0.1688, simple_loss=0.2284, pruned_loss=0.05462, over 4933.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2599, pruned_loss=0.0631, over 955202.92 frames. ], batch size: 33, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:37:45,940 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.771e+01 1.547e+02 1.858e+02 2.245e+02 5.805e+02, threshold=3.715e+02, percent-clipped=2.0 +2023-03-26 13:38:06,502 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3700, 1.8125, 1.6065, 1.5542, 1.9780, 1.8675, 1.7628, 1.6855], + device='cuda:5'), covar=tensor([0.0547, 0.0254, 0.0475, 0.0345, 0.0318, 0.0430, 0.0326, 0.0382], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0108, 0.0140, 0.0114, 0.0102, 0.0103, 0.0092, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.0826e-05, 8.4064e-05, 1.1143e-04, 8.9238e-05, 7.9646e-05, 7.6235e-05, + 6.9773e-05, 8.2291e-05], device='cuda:5') +2023-03-26 13:38:11,457 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0363, 1.4052, 2.0170, 1.9280, 1.7483, 1.7201, 1.8789, 1.8263], + device='cuda:5'), covar=tensor([0.4133, 0.4840, 0.3944, 0.4172, 0.5701, 0.4521, 0.5437, 0.3876], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0238, 0.0253, 0.0258, 0.0255, 0.0231, 0.0274, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:38:15,493 INFO [finetune.py:976] (5/7) Epoch 11, batch 4300, loss[loss=0.183, simple_loss=0.2444, pruned_loss=0.06075, over 4901.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2575, pruned_loss=0.0626, over 954200.00 frames. ], batch size: 35, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:38:33,386 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2497, 4.8878, 4.6177, 2.8874, 4.9330, 3.9294, 1.2257, 3.6594], + device='cuda:5'), covar=tensor([0.2016, 0.1602, 0.1460, 0.2926, 0.0772, 0.0834, 0.4319, 0.1340], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0173, 0.0159, 0.0128, 0.0155, 0.0121, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 13:38:48,430 INFO [finetune.py:976] (5/7) Epoch 11, batch 4350, loss[loss=0.2089, simple_loss=0.2783, pruned_loss=0.06968, over 4810.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2561, pruned_loss=0.06243, over 953171.38 frames. ], batch size: 41, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:38:54,824 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.000e+02 1.580e+02 1.801e+02 2.212e+02 3.446e+02, threshold=3.603e+02, percent-clipped=0.0 +2023-03-26 13:39:21,857 INFO [finetune.py:976] (5/7) Epoch 11, batch 4400, loss[loss=0.269, simple_loss=0.3199, pruned_loss=0.1091, over 4726.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2575, pruned_loss=0.06327, over 953712.08 frames. ], batch size: 59, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:39:53,791 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61717.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:39:54,466 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2852, 2.2259, 1.7821, 2.1554, 2.2662, 1.8794, 2.5333, 2.2875], + device='cuda:5'), covar=tensor([0.1399, 0.2436, 0.3376, 0.3076, 0.2735, 0.1941, 0.3499, 0.2088], + device='cuda:5'), in_proj_covar=tensor([0.0176, 0.0187, 0.0234, 0.0254, 0.0240, 0.0198, 0.0213, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:40:04,332 INFO [finetune.py:976] (5/7) Epoch 11, batch 4450, loss[loss=0.2227, simple_loss=0.2936, pruned_loss=0.07591, over 4819.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2626, pruned_loss=0.06518, over 952917.99 frames. ], batch size: 40, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:40:07,485 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61732.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:40:14,300 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.628e+02 1.977e+02 2.534e+02 3.640e+02, threshold=3.954e+02, percent-clipped=2.0 +2023-03-26 13:40:17,358 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2067, 2.1213, 1.7193, 1.9557, 2.2611, 1.8814, 2.4108, 2.2274], + device='cuda:5'), covar=tensor([0.1472, 0.2238, 0.3506, 0.2877, 0.2620, 0.1829, 0.3227, 0.1956], + device='cuda:5'), in_proj_covar=tensor([0.0175, 0.0187, 0.0233, 0.0254, 0.0239, 0.0197, 0.0212, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:40:23,521 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61743.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:40:25,392 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-26 13:40:49,786 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=61765.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:40:57,021 INFO [finetune.py:976] (5/7) Epoch 11, batch 4500, loss[loss=0.214, simple_loss=0.2896, pruned_loss=0.06917, over 4923.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2629, pruned_loss=0.06519, over 954759.98 frames. ], batch size: 42, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:41:07,877 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61793.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:41:16,101 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61804.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:41:18,393 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5854, 1.3985, 2.0334, 3.1827, 2.1663, 2.1684, 0.8920, 2.5866], + device='cuda:5'), covar=tensor([0.1835, 0.1523, 0.1262, 0.0691, 0.0865, 0.1379, 0.1910, 0.0596], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0115, 0.0133, 0.0163, 0.0101, 0.0138, 0.0125, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 13:41:27,757 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6869, 1.5076, 1.0825, 0.2949, 1.2278, 1.4755, 1.4098, 1.3812], + device='cuda:5'), covar=tensor([0.0959, 0.0808, 0.1282, 0.1982, 0.1525, 0.2507, 0.2357, 0.0936], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0198, 0.0202, 0.0185, 0.0215, 0.0208, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:41:33,088 INFO [finetune.py:976] (5/7) Epoch 11, batch 4550, loss[loss=0.1972, simple_loss=0.2707, pruned_loss=0.06183, over 4777.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2636, pruned_loss=0.06526, over 954270.41 frames. ], batch size: 51, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:41:43,495 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.535e+01 1.607e+02 1.951e+02 2.245e+02 3.846e+02, threshold=3.902e+02, percent-clipped=0.0 +2023-03-26 13:41:54,837 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.21 vs. limit=5.0 +2023-03-26 13:42:15,221 INFO [finetune.py:976] (5/7) Epoch 11, batch 4600, loss[loss=0.179, simple_loss=0.2504, pruned_loss=0.05384, over 4750.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.263, pruned_loss=0.06538, over 953038.46 frames. ], batch size: 28, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:42:40,453 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61914.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:42:47,513 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9679, 1.8843, 1.7663, 2.0616, 2.2567, 2.0700, 1.9169, 1.6940], + device='cuda:5'), covar=tensor([0.1660, 0.1791, 0.1567, 0.1306, 0.1761, 0.1028, 0.2046, 0.1549], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0207, 0.0208, 0.0189, 0.0241, 0.0181, 0.0211, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:42:48,596 INFO [finetune.py:976] (5/7) Epoch 11, batch 4650, loss[loss=0.1943, simple_loss=0.2581, pruned_loss=0.06524, over 4906.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2611, pruned_loss=0.06499, over 954797.54 frames. ], batch size: 37, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:42:56,050 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.606e+02 1.934e+02 2.317e+02 5.626e+02, threshold=3.867e+02, percent-clipped=3.0 +2023-03-26 13:43:31,725 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61975.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:43:32,826 INFO [finetune.py:976] (5/7) Epoch 11, batch 4700, loss[loss=0.1763, simple_loss=0.2479, pruned_loss=0.05241, over 4819.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2584, pruned_loss=0.06435, over 955140.50 frames. ], batch size: 38, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:43:35,538 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-03-26 13:43:57,708 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0608, 1.8484, 1.5968, 1.7790, 1.7942, 1.8266, 1.8443, 2.5627], + device='cuda:5'), covar=tensor([0.4186, 0.4872, 0.3721, 0.4719, 0.4275, 0.2500, 0.4274, 0.1840], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0260, 0.0223, 0.0277, 0.0243, 0.0210, 0.0246, 0.0216], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:44:17,458 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7093, 1.6036, 1.5489, 1.7315, 1.0710, 3.6166, 1.3259, 1.9309], + device='cuda:5'), covar=tensor([0.3421, 0.2522, 0.2168, 0.2301, 0.1943, 0.0165, 0.2677, 0.1286], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0119, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:44:19,773 INFO [finetune.py:976] (5/7) Epoch 11, batch 4750, loss[loss=0.1629, simple_loss=0.2282, pruned_loss=0.0488, over 4874.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2554, pruned_loss=0.06291, over 955814.51 frames. ], batch size: 31, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:44:25,601 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.066e+02 1.474e+02 1.769e+02 2.148e+02 4.944e+02, threshold=3.539e+02, percent-clipped=1.0 +2023-03-26 13:44:53,404 INFO [finetune.py:976] (5/7) Epoch 11, batch 4800, loss[loss=0.2149, simple_loss=0.2764, pruned_loss=0.07666, over 4864.00 frames. ], tot_loss[loss=0.1934, simple_loss=0.2585, pruned_loss=0.06416, over 954242.30 frames. ], batch size: 31, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:45:06,473 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62088.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:45:13,199 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62099.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:45:13,875 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4168, 1.5459, 1.2932, 1.5881, 1.8705, 1.6973, 1.6147, 1.3479], + device='cuda:5'), covar=tensor([0.0328, 0.0265, 0.0517, 0.0246, 0.0202, 0.0412, 0.0263, 0.0373], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0109, 0.0141, 0.0114, 0.0102, 0.0104, 0.0093, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.1744e-05, 8.4658e-05, 1.1210e-04, 8.9451e-05, 7.9721e-05, 7.7106e-05, + 6.9875e-05, 8.3012e-05], device='cuda:5') +2023-03-26 13:45:28,989 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3921, 0.9836, 0.8135, 1.2964, 1.8211, 0.7218, 1.1830, 1.3008], + device='cuda:5'), covar=tensor([0.1587, 0.2248, 0.1801, 0.1265, 0.1963, 0.1893, 0.1498, 0.2045], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0113, 0.0093, 0.0120, 0.0094, 0.0099, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 13:45:47,608 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62122.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:45:50,590 INFO [finetune.py:976] (5/7) Epoch 11, batch 4850, loss[loss=0.2366, simple_loss=0.3049, pruned_loss=0.08414, over 4892.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2622, pruned_loss=0.06516, over 954193.04 frames. ], batch size: 35, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:46:01,541 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.730e+02 2.037e+02 2.587e+02 8.043e+02, threshold=4.075e+02, percent-clipped=4.0 +2023-03-26 13:46:45,230 INFO [finetune.py:976] (5/7) Epoch 11, batch 4900, loss[loss=0.227, simple_loss=0.2943, pruned_loss=0.07983, over 4907.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2633, pruned_loss=0.06589, over 953277.72 frames. ], batch size: 37, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:46:54,060 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62183.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:47:05,010 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1523, 1.5265, 0.7656, 1.9780, 2.5258, 1.6890, 1.9096, 1.9185], + device='cuda:5'), covar=tensor([0.1305, 0.2000, 0.2275, 0.1106, 0.1710, 0.1901, 0.1334, 0.2038], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0114, 0.0093, 0.0120, 0.0095, 0.0099, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 13:47:49,177 INFO [finetune.py:976] (5/7) Epoch 11, batch 4950, loss[loss=0.1918, simple_loss=0.2572, pruned_loss=0.06325, over 4884.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2643, pruned_loss=0.06612, over 952601.76 frames. ], batch size: 32, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:47:56,654 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.728e+02 2.029e+02 2.471e+02 5.736e+02, threshold=4.057e+02, percent-clipped=2.0 +2023-03-26 13:48:11,303 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3477, 1.5846, 1.2685, 1.5556, 1.8661, 1.7249, 1.5645, 1.3478], + device='cuda:5'), covar=tensor([0.0367, 0.0271, 0.0551, 0.0298, 0.0189, 0.0431, 0.0346, 0.0398], + device='cuda:5'), in_proj_covar=tensor([0.0092, 0.0109, 0.0141, 0.0115, 0.0102, 0.0104, 0.0093, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.1989e-05, 8.4993e-05, 1.1230e-04, 8.9677e-05, 7.9736e-05, 7.7349e-05, + 6.9917e-05, 8.3264e-05], device='cuda:5') +2023-03-26 13:48:18,909 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62270.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:48:21,267 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62273.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:48:24,035 INFO [finetune.py:976] (5/7) Epoch 11, batch 5000, loss[loss=0.1642, simple_loss=0.2248, pruned_loss=0.05175, over 4232.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2625, pruned_loss=0.0654, over 952522.54 frames. ], batch size: 65, lr: 3.68e-03, grad_scale: 32.0 +2023-03-26 13:48:34,816 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8979, 1.9088, 1.6941, 2.0754, 2.4102, 2.1537, 1.6296, 1.5691], + device='cuda:5'), covar=tensor([0.2315, 0.1999, 0.1979, 0.1652, 0.1818, 0.1146, 0.2424, 0.1999], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0207, 0.0208, 0.0189, 0.0242, 0.0182, 0.0213, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:48:44,480 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2421, 2.0748, 1.7646, 2.1788, 2.0092, 1.9728, 2.0106, 2.7086], + device='cuda:5'), covar=tensor([0.4736, 0.5511, 0.4012, 0.4524, 0.4595, 0.2879, 0.4519, 0.2099], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0224, 0.0278, 0.0244, 0.0210, 0.0248, 0.0217], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:48:46,895 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6170, 1.5152, 1.4273, 1.5790, 1.0226, 3.2882, 1.2969, 1.7570], + device='cuda:5'), covar=tensor([0.4037, 0.3085, 0.2513, 0.2972, 0.2071, 0.0300, 0.2637, 0.1284], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0119, 0.0123, 0.0114, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:48:57,128 INFO [finetune.py:976] (5/7) Epoch 11, batch 5050, loss[loss=0.1734, simple_loss=0.2377, pruned_loss=0.05458, over 4769.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2593, pruned_loss=0.06451, over 954712.15 frames. ], batch size: 28, lr: 3.68e-03, grad_scale: 16.0 +2023-03-26 13:49:02,472 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62334.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:49:04,172 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.504e+02 1.759e+02 2.068e+02 4.473e+02, threshold=3.518e+02, percent-clipped=1.0 +2023-03-26 13:49:32,190 INFO [finetune.py:976] (5/7) Epoch 11, batch 5100, loss[loss=0.1751, simple_loss=0.2337, pruned_loss=0.05824, over 4083.00 frames. ], tot_loss[loss=0.1904, simple_loss=0.2555, pruned_loss=0.06264, over 953329.49 frames. ], batch size: 18, lr: 3.68e-03, grad_scale: 16.0 +2023-03-26 13:49:39,423 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8836, 1.7136, 1.6676, 1.8076, 1.5749, 3.7421, 1.6201, 2.1589], + device='cuda:5'), covar=tensor([0.2948, 0.2271, 0.1998, 0.2190, 0.1479, 0.0163, 0.2418, 0.1125], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0118, 0.0122, 0.0114, 0.0098, 0.0098, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 13:49:40,511 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62388.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:49:42,326 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62391.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:49:47,629 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62399.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:50:00,043 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 13:50:05,687 INFO [finetune.py:976] (5/7) Epoch 11, batch 5150, loss[loss=0.2292, simple_loss=0.2861, pruned_loss=0.08608, over 4895.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2562, pruned_loss=0.0635, over 952771.33 frames. ], batch size: 35, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:50:12,137 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=62436.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:50:12,674 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.578e+02 2.001e+02 2.432e+02 3.455e+02, threshold=4.003e+02, percent-clipped=0.0 +2023-03-26 13:50:26,783 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=62447.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:50:26,843 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3126, 2.3337, 2.1961, 1.6135, 2.3725, 2.4391, 2.3366, 2.0295], + device='cuda:5'), covar=tensor([0.0597, 0.0602, 0.0798, 0.0933, 0.0603, 0.0721, 0.0684, 0.1044], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0135, 0.0142, 0.0125, 0.0121, 0.0145, 0.0145, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:50:30,450 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62452.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:50:45,217 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3439, 4.6491, 4.9143, 5.1152, 5.0142, 4.7582, 5.4338, 1.7818], + device='cuda:5'), covar=tensor([0.0728, 0.0851, 0.0794, 0.0984, 0.1093, 0.1623, 0.0527, 0.5362], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0245, 0.0278, 0.0292, 0.0333, 0.0286, 0.0304, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:50:55,269 INFO [finetune.py:976] (5/7) Epoch 11, batch 5200, loss[loss=0.1513, simple_loss=0.2138, pruned_loss=0.04437, over 4354.00 frames. ], tot_loss[loss=0.1938, simple_loss=0.2587, pruned_loss=0.06448, over 953613.00 frames. ], batch size: 19, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:50:56,961 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62478.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:51:03,733 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 13:51:36,855 INFO [finetune.py:976] (5/7) Epoch 11, batch 5250, loss[loss=0.1926, simple_loss=0.2641, pruned_loss=0.06057, over 4829.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2615, pruned_loss=0.06561, over 953529.88 frames. ], batch size: 47, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:51:54,382 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 1.618e+02 1.949e+02 2.406e+02 7.235e+02, threshold=3.897e+02, percent-clipped=3.0 +2023-03-26 13:52:03,520 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62545.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:52:19,512 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62570.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:52:23,690 INFO [finetune.py:976] (5/7) Epoch 11, batch 5300, loss[loss=0.2085, simple_loss=0.2687, pruned_loss=0.07415, over 4814.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2639, pruned_loss=0.06662, over 953040.89 frames. ], batch size: 39, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:52:29,599 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62585.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:52:33,077 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.52 vs. limit=5.0 +2023-03-26 13:52:44,283 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62606.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:52:46,151 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9770, 1.8946, 1.5446, 1.8301, 2.0102, 1.6722, 2.1586, 1.8820], + device='cuda:5'), covar=tensor([0.1606, 0.2227, 0.3390, 0.2668, 0.2757, 0.1960, 0.3418, 0.2103], + device='cuda:5'), in_proj_covar=tensor([0.0176, 0.0188, 0.0234, 0.0255, 0.0242, 0.0198, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:52:52,192 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=62618.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:52:57,602 INFO [finetune.py:976] (5/7) Epoch 11, batch 5350, loss[loss=0.1779, simple_loss=0.2612, pruned_loss=0.04733, over 4815.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.263, pruned_loss=0.06524, over 952937.19 frames. ], batch size: 33, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:52:58,903 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62629.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:53:04,200 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.504e+02 1.845e+02 2.238e+02 3.589e+02, threshold=3.690e+02, percent-clipped=0.0 +2023-03-26 13:53:10,790 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62646.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:53:24,279 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62666.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:53:30,762 INFO [finetune.py:976] (5/7) Epoch 11, batch 5400, loss[loss=0.1601, simple_loss=0.2324, pruned_loss=0.04393, over 4892.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2611, pruned_loss=0.06481, over 953582.92 frames. ], batch size: 43, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:54:04,663 INFO [finetune.py:976] (5/7) Epoch 11, batch 5450, loss[loss=0.1498, simple_loss=0.2224, pruned_loss=0.03859, over 4383.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2577, pruned_loss=0.06387, over 951170.98 frames. ], batch size: 19, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:54:04,773 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62727.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:54:08,519 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.47 vs. limit=5.0 +2023-03-26 13:54:10,765 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.513e+01 1.463e+02 1.876e+02 2.335e+02 4.427e+02, threshold=3.751e+02, percent-clipped=2.0 +2023-03-26 13:54:17,810 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62747.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:54:24,397 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5284, 2.6110, 2.7787, 1.5114, 3.0198, 3.0847, 2.7694, 2.3392], + device='cuda:5'), covar=tensor([0.0922, 0.0670, 0.0319, 0.0687, 0.0441, 0.0738, 0.0377, 0.0659], + device='cuda:5'), in_proj_covar=tensor([0.0128, 0.0153, 0.0122, 0.0133, 0.0131, 0.0126, 0.0144, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.4479e-05, 1.1218e-04, 8.8256e-05, 9.5957e-05, 9.2964e-05, 9.1758e-05, + 1.0488e-04, 1.0744e-04], device='cuda:5') +2023-03-26 13:54:36,191 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62773.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:54:38,555 INFO [finetune.py:976] (5/7) Epoch 11, batch 5500, loss[loss=0.1613, simple_loss=0.2296, pruned_loss=0.04653, over 4895.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2546, pruned_loss=0.06269, over 951058.27 frames. ], batch size: 32, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:54:39,233 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62778.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:54:57,736 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7546, 1.8741, 1.6706, 1.5319, 2.2892, 2.1872, 2.0491, 1.8542], + device='cuda:5'), covar=tensor([0.0399, 0.0382, 0.0552, 0.0396, 0.0269, 0.0478, 0.0322, 0.0401], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0110, 0.0142, 0.0115, 0.0103, 0.0105, 0.0093, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.2248e-05, 8.5243e-05, 1.1269e-04, 8.9722e-05, 8.0288e-05, 7.7804e-05, + 7.0479e-05, 8.4038e-05], device='cuda:5') +2023-03-26 13:55:01,845 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3508, 3.7813, 3.9793, 4.1981, 4.0862, 3.9322, 4.4055, 1.3735], + device='cuda:5'), covar=tensor([0.0742, 0.0870, 0.0752, 0.0982, 0.1282, 0.1407, 0.0701, 0.5658], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0243, 0.0276, 0.0289, 0.0331, 0.0283, 0.0302, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:55:12,351 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=62826.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:55:12,914 INFO [finetune.py:976] (5/7) Epoch 11, batch 5550, loss[loss=0.2246, simple_loss=0.2919, pruned_loss=0.07864, over 4927.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2563, pruned_loss=0.06352, over 951604.28 frames. ], batch size: 38, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:55:17,980 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62834.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:55:19,877 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.580e+02 1.841e+02 2.336e+02 5.980e+02, threshold=3.683e+02, percent-clipped=6.0 +2023-03-26 13:55:41,660 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0152, 1.9190, 1.6054, 1.8459, 1.9515, 1.6781, 2.1552, 1.9524], + device='cuda:5'), covar=tensor([0.1406, 0.2421, 0.3398, 0.2674, 0.3000, 0.1856, 0.3438, 0.2095], + device='cuda:5'), in_proj_covar=tensor([0.0176, 0.0188, 0.0233, 0.0255, 0.0242, 0.0198, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:55:52,252 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5369, 1.0417, 0.8648, 1.4505, 1.8594, 1.1887, 1.2170, 1.4395], + device='cuda:5'), covar=tensor([0.1943, 0.2862, 0.2479, 0.1600, 0.2389, 0.2535, 0.2096, 0.2653], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0094, 0.0111, 0.0092, 0.0118, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 13:56:03,690 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0596, 1.7463, 2.2275, 3.4297, 2.5172, 2.5881, 1.0888, 2.7377], + device='cuda:5'), covar=tensor([0.1466, 0.1362, 0.1269, 0.0505, 0.0689, 0.2215, 0.1682, 0.0535], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0115, 0.0134, 0.0163, 0.0100, 0.0138, 0.0125, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 13:56:07,690 INFO [finetune.py:976] (5/7) Epoch 11, batch 5600, loss[loss=0.2222, simple_loss=0.2856, pruned_loss=0.07938, over 4113.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2593, pruned_loss=0.06397, over 952483.11 frames. ], batch size: 65, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:56:17,002 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3040, 2.9236, 3.0670, 3.2398, 3.0847, 2.9101, 3.3601, 0.9326], + device='cuda:5'), covar=tensor([0.1105, 0.1014, 0.1017, 0.1254, 0.1744, 0.1793, 0.1079, 0.5223], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0243, 0.0275, 0.0289, 0.0330, 0.0283, 0.0302, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:56:22,195 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62901.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:56:36,248 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8914, 1.6799, 1.4939, 1.5021, 1.6466, 1.6622, 1.6178, 2.3724], + device='cuda:5'), covar=tensor([0.4361, 0.4934, 0.3589, 0.4241, 0.4306, 0.2437, 0.4382, 0.1693], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0260, 0.0223, 0.0275, 0.0242, 0.0208, 0.0245, 0.0216], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:56:37,252 INFO [finetune.py:976] (5/7) Epoch 11, batch 5650, loss[loss=0.2073, simple_loss=0.2828, pruned_loss=0.06593, over 4806.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2616, pruned_loss=0.06412, over 952978.78 frames. ], batch size: 41, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:56:38,504 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62929.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:56:48,738 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.326e+01 1.606e+02 1.910e+02 2.279e+02 4.497e+02, threshold=3.820e+02, percent-clipped=2.0 +2023-03-26 13:56:51,141 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62941.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:57:23,400 INFO [finetune.py:976] (5/7) Epoch 11, batch 5700, loss[loss=0.1677, simple_loss=0.2346, pruned_loss=0.0504, over 3912.00 frames. ], tot_loss[loss=0.1925, simple_loss=0.258, pruned_loss=0.06354, over 931493.01 frames. ], batch size: 17, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:57:23,433 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=62977.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:57:23,693 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-03-26 13:57:54,981 INFO [finetune.py:976] (5/7) Epoch 12, batch 0, loss[loss=0.1794, simple_loss=0.2503, pruned_loss=0.0542, over 4752.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.2503, pruned_loss=0.0542, over 4752.00 frames. ], batch size: 28, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:57:54,982 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 13:58:04,649 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1484, 1.8913, 1.7414, 1.7259, 1.8580, 1.8213, 1.8194, 2.5547], + device='cuda:5'), covar=tensor([0.4428, 0.5194, 0.3758, 0.4478, 0.4341, 0.2853, 0.4401, 0.1817], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0260, 0.0222, 0.0275, 0.0242, 0.0209, 0.0245, 0.0216], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:58:11,586 INFO [finetune.py:1010] (5/7) Epoch 12, validation: loss=0.16, simple_loss=0.2305, pruned_loss=0.04472, over 2265189.00 frames. +2023-03-26 13:58:11,586 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 13:58:19,013 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9281, 1.9340, 1.9703, 1.2589, 1.9680, 2.0274, 1.9812, 1.6527], + device='cuda:5'), covar=tensor([0.0610, 0.0632, 0.0781, 0.0999, 0.0720, 0.0737, 0.0648, 0.1133], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0134, 0.0141, 0.0125, 0.0121, 0.0143, 0.0143, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:58:22,061 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63022.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:58:37,036 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.014e+02 1.590e+02 1.966e+02 2.351e+02 4.424e+02, threshold=3.931e+02, percent-clipped=2.0 +2023-03-26 13:58:37,198 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9358, 1.7493, 1.5003, 1.6325, 1.6699, 1.6641, 1.6707, 2.4170], + device='cuda:5'), covar=tensor([0.4370, 0.4527, 0.3603, 0.4172, 0.4290, 0.2559, 0.4087, 0.1776], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0259, 0.0222, 0.0275, 0.0242, 0.0208, 0.0245, 0.0215], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:58:48,221 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 13:58:49,957 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63047.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:59:00,695 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 13:59:00,856 INFO [finetune.py:976] (5/7) Epoch 12, batch 50, loss[loss=0.2493, simple_loss=0.3046, pruned_loss=0.09702, over 4753.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2613, pruned_loss=0.06489, over 214417.64 frames. ], batch size: 54, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 13:59:10,751 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9797, 3.0865, 2.9109, 2.1400, 3.0182, 3.2439, 3.1608, 2.6723], + device='cuda:5'), covar=tensor([0.0524, 0.0521, 0.0657, 0.0845, 0.0490, 0.0630, 0.0587, 0.0901], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0134, 0.0141, 0.0125, 0.0121, 0.0144, 0.0143, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 13:59:42,650 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=63095.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 13:59:51,332 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.79 vs. limit=5.0 +2023-03-26 13:59:54,691 INFO [finetune.py:976] (5/7) Epoch 12, batch 100, loss[loss=0.1816, simple_loss=0.2451, pruned_loss=0.05901, over 4899.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2543, pruned_loss=0.0621, over 380247.98 frames. ], batch size: 43, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 14:00:15,391 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63129.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:00:21,137 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.723e+02 1.978e+02 2.544e+02 5.107e+02, threshold=3.957e+02, percent-clipped=1.0 +2023-03-26 14:00:50,143 INFO [finetune.py:976] (5/7) Epoch 12, batch 150, loss[loss=0.1773, simple_loss=0.238, pruned_loss=0.05825, over 4902.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2503, pruned_loss=0.06071, over 507770.78 frames. ], batch size: 35, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 14:01:47,575 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63201.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:01:56,081 INFO [finetune.py:976] (5/7) Epoch 12, batch 200, loss[loss=0.1604, simple_loss=0.2347, pruned_loss=0.04298, over 4911.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2504, pruned_loss=0.0609, over 606711.76 frames. ], batch size: 43, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 14:02:17,458 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63221.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:02:32,830 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.551e+02 1.870e+02 2.223e+02 3.918e+02, threshold=3.740e+02, percent-clipped=0.0 +2023-03-26 14:02:41,481 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63241.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:02:46,789 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=63249.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:02:50,817 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0345, 2.1257, 2.7161, 2.4745, 2.3252, 4.7062, 2.0877, 2.2220], + device='cuda:5'), covar=tensor([0.0878, 0.1544, 0.0881, 0.0823, 0.1273, 0.0199, 0.1213, 0.1455], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0075, 0.0078, 0.0092, 0.0081, 0.0085, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:02:51,344 INFO [finetune.py:976] (5/7) Epoch 12, batch 250, loss[loss=0.1243, simple_loss=0.1878, pruned_loss=0.03038, over 4439.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2538, pruned_loss=0.06172, over 683892.84 frames. ], batch size: 19, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 14:02:55,500 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8434, 4.0542, 3.8386, 2.0105, 4.2348, 3.0857, 0.8999, 2.8440], + device='cuda:5'), covar=tensor([0.2305, 0.1978, 0.1670, 0.3381, 0.0917, 0.0970, 0.4704, 0.1456], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0175, 0.0160, 0.0129, 0.0156, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 14:03:02,676 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8944, 1.5281, 2.2950, 3.5561, 2.4624, 2.5400, 1.2532, 2.8366], + device='cuda:5'), covar=tensor([0.1664, 0.1494, 0.1270, 0.0545, 0.0783, 0.1989, 0.1473, 0.0488], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0115, 0.0133, 0.0163, 0.0100, 0.0137, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:03:08,676 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63282.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:03:13,375 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=63289.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:03:23,974 INFO [finetune.py:976] (5/7) Epoch 12, batch 300, loss[loss=0.2388, simple_loss=0.2891, pruned_loss=0.0942, over 4906.00 frames. ], tot_loss[loss=0.1928, simple_loss=0.2586, pruned_loss=0.06349, over 744020.07 frames. ], batch size: 38, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 14:03:40,235 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63322.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:03:51,184 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.087e+02 1.663e+02 2.076e+02 2.406e+02 5.777e+02, threshold=4.151e+02, percent-clipped=4.0 +2023-03-26 14:03:55,035 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 14:04:08,570 INFO [finetune.py:976] (5/7) Epoch 12, batch 350, loss[loss=0.1812, simple_loss=0.2615, pruned_loss=0.05045, over 4837.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2607, pruned_loss=0.0639, over 789797.43 frames. ], batch size: 47, lr: 3.67e-03, grad_scale: 16.0 +2023-03-26 14:04:27,604 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=63370.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:04:59,621 INFO [finetune.py:976] (5/7) Epoch 12, batch 400, loss[loss=0.1631, simple_loss=0.2238, pruned_loss=0.05118, over 4082.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2604, pruned_loss=0.06337, over 825020.32 frames. ], batch size: 17, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:05:02,021 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5566, 1.4281, 2.1221, 1.9345, 1.7930, 4.2499, 1.4291, 1.7226], + device='cuda:5'), covar=tensor([0.1012, 0.1887, 0.1268, 0.1019, 0.1530, 0.0229, 0.1542, 0.1780], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0078, 0.0092, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:05:08,496 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9486, 1.4053, 1.9103, 1.8503, 1.6296, 1.6412, 1.8384, 1.7369], + device='cuda:5'), covar=tensor([0.3960, 0.4603, 0.3926, 0.4104, 0.5355, 0.3966, 0.5334, 0.3710], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0239, 0.0256, 0.0261, 0.0258, 0.0232, 0.0276, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:05:10,717 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63420.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:05:12,395 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63422.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:05:16,646 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63429.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:05:21,323 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.052e+02 1.591e+02 1.854e+02 2.332e+02 4.296e+02, threshold=3.709e+02, percent-clipped=1.0 +2023-03-26 14:05:38,142 INFO [finetune.py:976] (5/7) Epoch 12, batch 450, loss[loss=0.1907, simple_loss=0.2569, pruned_loss=0.06227, over 4829.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2597, pruned_loss=0.06313, over 855240.42 frames. ], batch size: 33, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:05:57,272 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=63477.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:05:59,777 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63481.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:06:00,974 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63483.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:06:08,820 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2626, 1.3364, 1.4622, 1.6041, 1.5805, 2.9566, 1.2227, 1.5460], + device='cuda:5'), covar=tensor([0.1144, 0.2024, 0.1155, 0.1085, 0.1645, 0.0329, 0.1769, 0.1997], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0078, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:06:15,170 INFO [finetune.py:976] (5/7) Epoch 12, batch 500, loss[loss=0.1668, simple_loss=0.2378, pruned_loss=0.04787, over 4767.00 frames. ], tot_loss[loss=0.1919, simple_loss=0.2581, pruned_loss=0.06289, over 879610.38 frames. ], batch size: 28, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:06:37,054 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.336e+01 1.553e+02 1.855e+02 2.331e+02 4.193e+02, threshold=3.711e+02, percent-clipped=1.0 +2023-03-26 14:06:37,735 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1191, 3.5793, 3.8284, 3.7131, 3.6474, 3.5511, 4.1969, 1.3668], + device='cuda:5'), covar=tensor([0.1314, 0.1885, 0.1675, 0.2046, 0.2405, 0.2400, 0.1420, 0.7769], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0244, 0.0276, 0.0290, 0.0330, 0.0283, 0.0301, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:06:48,874 INFO [finetune.py:976] (5/7) Epoch 12, batch 550, loss[loss=0.2369, simple_loss=0.2901, pruned_loss=0.09187, over 4731.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2552, pruned_loss=0.06213, over 893281.77 frames. ], batch size: 59, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:06:58,443 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63569.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:07:03,808 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63577.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:07:10,289 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63586.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:07:17,007 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-03-26 14:07:22,327 INFO [finetune.py:976] (5/7) Epoch 12, batch 600, loss[loss=0.1976, simple_loss=0.264, pruned_loss=0.06564, over 4858.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.2561, pruned_loss=0.0627, over 908235.64 frames. ], batch size: 44, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:07:40,678 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63630.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:07:44,850 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.685e+02 2.017e+02 2.531e+02 3.696e+02, threshold=4.034e+02, percent-clipped=0.0 +2023-03-26 14:07:51,117 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63647.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:07:56,391 INFO [finetune.py:976] (5/7) Epoch 12, batch 650, loss[loss=0.1719, simple_loss=0.2527, pruned_loss=0.04553, over 4857.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2578, pruned_loss=0.06279, over 914845.45 frames. ], batch size: 44, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:08:15,013 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 14:08:29,866 INFO [finetune.py:976] (5/7) Epoch 12, batch 700, loss[loss=0.1987, simple_loss=0.2677, pruned_loss=0.06485, over 4830.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2586, pruned_loss=0.06289, over 923964.18 frames. ], batch size: 47, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:08:59,836 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.754e+02 2.049e+02 2.499e+02 4.974e+02, threshold=4.098e+02, percent-clipped=3.0 +2023-03-26 14:09:05,238 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.0942, 3.4949, 3.7154, 3.9364, 3.8549, 3.6816, 4.1633, 1.2444], + device='cuda:5'), covar=tensor([0.0757, 0.0916, 0.0827, 0.0878, 0.1193, 0.1401, 0.0711, 0.5178], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0244, 0.0276, 0.0290, 0.0330, 0.0283, 0.0300, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:09:11,206 INFO [finetune.py:976] (5/7) Epoch 12, batch 750, loss[loss=0.2, simple_loss=0.2661, pruned_loss=0.06697, over 4810.00 frames. ], tot_loss[loss=0.1931, simple_loss=0.26, pruned_loss=0.06311, over 932143.98 frames. ], batch size: 40, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:09:25,545 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63776.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:09:26,761 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63778.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:09:54,816 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.93 vs. limit=5.0 +2023-03-26 14:09:56,456 INFO [finetune.py:976] (5/7) Epoch 12, batch 800, loss[loss=0.1901, simple_loss=0.2561, pruned_loss=0.06206, over 4887.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2604, pruned_loss=0.06305, over 937420.85 frames. ], batch size: 32, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:10:04,888 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63810.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:10:25,523 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4433, 1.3678, 1.3304, 1.3446, 0.7595, 2.2658, 0.7200, 1.2551], + device='cuda:5'), covar=tensor([0.3390, 0.2546, 0.2231, 0.2538, 0.2127, 0.0381, 0.2868, 0.1440], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0119, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:10:26,007 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.587e+02 1.868e+02 2.134e+02 3.136e+02, threshold=3.736e+02, percent-clipped=1.0 +2023-03-26 14:10:32,479 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63845.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:10:34,913 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5970, 1.4665, 2.1084, 3.2142, 2.2737, 2.3899, 1.1146, 2.5727], + device='cuda:5'), covar=tensor([0.1802, 0.1423, 0.1231, 0.0529, 0.0776, 0.1174, 0.1766, 0.0540], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0134, 0.0164, 0.0101, 0.0138, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:10:38,486 INFO [finetune.py:976] (5/7) Epoch 12, batch 850, loss[loss=0.1523, simple_loss=0.231, pruned_loss=0.03682, over 4754.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2582, pruned_loss=0.06255, over 941265.15 frames. ], batch size: 28, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:10:41,022 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1964, 2.0187, 1.5927, 2.0547, 2.1857, 1.8907, 2.4333, 2.1397], + device='cuda:5'), covar=tensor([0.1293, 0.2564, 0.3407, 0.2956, 0.2647, 0.1730, 0.3831, 0.2041], + device='cuda:5'), in_proj_covar=tensor([0.0177, 0.0188, 0.0234, 0.0255, 0.0241, 0.0198, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:10:51,326 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63871.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:10:59,633 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63877.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:11:03,732 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5075, 1.4089, 1.8251, 1.9256, 1.6565, 3.5058, 1.2470, 1.6535], + device='cuda:5'), covar=tensor([0.0974, 0.1822, 0.1031, 0.0867, 0.1520, 0.0299, 0.1539, 0.1636], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0078, 0.0092, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:11:22,732 INFO [finetune.py:976] (5/7) Epoch 12, batch 900, loss[loss=0.1694, simple_loss=0.2338, pruned_loss=0.05251, over 4822.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.2562, pruned_loss=0.06181, over 943871.51 frames. ], batch size: 39, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:11:23,446 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63906.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:11:29,451 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63916.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:11:35,893 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=63925.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:11:35,902 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63925.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:11:44,049 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.021e+02 1.611e+02 1.873e+02 2.372e+02 4.297e+02, threshold=3.747e+02, percent-clipped=2.0 +2023-03-26 14:11:47,184 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63942.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:11:56,454 INFO [finetune.py:976] (5/7) Epoch 12, batch 950, loss[loss=0.1974, simple_loss=0.2555, pruned_loss=0.06969, over 4833.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2539, pruned_loss=0.06101, over 947659.10 frames. ], batch size: 30, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:12:03,843 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1693, 3.5932, 3.8059, 3.9624, 3.8948, 3.6920, 4.2406, 1.4169], + device='cuda:5'), covar=tensor([0.0769, 0.0793, 0.0795, 0.0989, 0.1270, 0.1464, 0.0681, 0.5515], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0242, 0.0274, 0.0288, 0.0329, 0.0280, 0.0299, 0.0292], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:12:09,917 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 14:12:10,886 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63977.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:12:31,104 INFO [finetune.py:976] (5/7) Epoch 12, batch 1000, loss[loss=0.2078, simple_loss=0.2673, pruned_loss=0.0742, over 4749.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2567, pruned_loss=0.06231, over 948896.04 frames. ], batch size: 54, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:12:43,098 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64023.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:12:51,952 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.649e+02 1.875e+02 2.259e+02 3.443e+02, threshold=3.751e+02, percent-clipped=0.0 +2023-03-26 14:12:52,686 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0816, 1.9049, 2.1084, 1.3333, 1.9811, 2.1172, 2.0528, 1.6644], + device='cuda:5'), covar=tensor([0.0517, 0.0700, 0.0632, 0.0927, 0.0688, 0.0659, 0.0644, 0.1168], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0133, 0.0140, 0.0124, 0.0121, 0.0142, 0.0143, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:13:04,255 INFO [finetune.py:976] (5/7) Epoch 12, batch 1050, loss[loss=0.1997, simple_loss=0.2783, pruned_loss=0.06057, over 4913.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2608, pruned_loss=0.06332, over 951664.22 frames. ], batch size: 36, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:13:04,353 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1868, 1.3577, 1.2453, 1.3717, 1.5450, 2.4433, 1.3242, 1.5532], + device='cuda:5'), covar=tensor([0.1099, 0.1828, 0.1144, 0.1036, 0.1695, 0.0463, 0.1583, 0.1770], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0076, 0.0079, 0.0093, 0.0082, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:13:18,000 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64076.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:13:18,800 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 14:13:19,216 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64078.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:13:22,341 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3827, 2.2402, 1.7555, 0.8152, 1.9593, 1.8961, 1.8031, 2.0422], + device='cuda:5'), covar=tensor([0.0901, 0.0769, 0.1509, 0.1998, 0.1359, 0.2446, 0.2093, 0.0876], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0198, 0.0201, 0.0186, 0.0214, 0.0207, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:13:22,971 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64084.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:13:37,901 INFO [finetune.py:976] (5/7) Epoch 12, batch 1100, loss[loss=0.2365, simple_loss=0.299, pruned_loss=0.08699, over 4928.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2623, pruned_loss=0.06408, over 952624.46 frames. ], batch size: 41, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:13:42,888 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8000, 1.6168, 1.5962, 1.6215, 1.0906, 4.2856, 1.6209, 2.0107], + device='cuda:5'), covar=tensor([0.3328, 0.2440, 0.2088, 0.2309, 0.1848, 0.0120, 0.2401, 0.1282], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0119, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:13:53,906 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64124.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:13:55,109 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64126.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:14:00,929 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-03-26 14:14:05,898 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.584e+02 1.925e+02 2.329e+02 4.054e+02, threshold=3.850e+02, percent-clipped=2.0 +2023-03-26 14:14:17,904 INFO [finetune.py:976] (5/7) Epoch 12, batch 1150, loss[loss=0.2331, simple_loss=0.2805, pruned_loss=0.09285, over 4877.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2624, pruned_loss=0.06363, over 955174.94 frames. ], batch size: 32, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:14:25,686 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64166.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:14:30,415 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7231, 1.6868, 1.6225, 1.6643, 1.2905, 3.9338, 1.5508, 1.9998], + device='cuda:5'), covar=tensor([0.3594, 0.2552, 0.2121, 0.2448, 0.1861, 0.0145, 0.2418, 0.1287], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0120, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:14:40,409 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1534, 3.5446, 3.8126, 3.9481, 3.8814, 3.6504, 4.2217, 1.3305], + device='cuda:5'), covar=tensor([0.0772, 0.0840, 0.0833, 0.0972, 0.1142, 0.1482, 0.0641, 0.5442], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0242, 0.0275, 0.0290, 0.0330, 0.0282, 0.0300, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:14:48,874 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64201.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:14:56,321 INFO [finetune.py:976] (5/7) Epoch 12, batch 1200, loss[loss=0.2026, simple_loss=0.2703, pruned_loss=0.06741, over 4933.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2614, pruned_loss=0.06344, over 955771.05 frames. ], batch size: 38, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:15:14,939 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64225.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:15:24,860 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64232.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:15:31,344 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.004e+02 1.575e+02 1.833e+02 2.193e+02 5.344e+02, threshold=3.667e+02, percent-clipped=2.0 +2023-03-26 14:15:34,441 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64242.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:15:43,194 INFO [finetune.py:976] (5/7) Epoch 12, batch 1250, loss[loss=0.1694, simple_loss=0.2309, pruned_loss=0.05397, over 4825.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2587, pruned_loss=0.0626, over 956067.55 frames. ], batch size: 30, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:15:55,101 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64272.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:15:55,714 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64273.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:16:09,179 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64290.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:16:11,100 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64293.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:16:27,224 INFO [finetune.py:976] (5/7) Epoch 12, batch 1300, loss[loss=0.1847, simple_loss=0.242, pruned_loss=0.06374, over 4810.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2552, pruned_loss=0.06115, over 956864.82 frames. ], batch size: 25, lr: 3.66e-03, grad_scale: 16.0 +2023-03-26 14:16:42,380 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-26 14:16:48,480 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.098e+02 1.610e+02 1.842e+02 2.244e+02 4.381e+02, threshold=3.684e+02, percent-clipped=1.0 +2023-03-26 14:16:53,422 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64345.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:16:55,226 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9712, 1.6566, 2.2983, 3.7296, 2.6745, 2.6769, 0.9514, 3.0139], + device='cuda:5'), covar=tensor([0.1748, 0.1541, 0.1458, 0.0526, 0.0763, 0.1644, 0.1962, 0.0509], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0116, 0.0135, 0.0165, 0.0101, 0.0138, 0.0127, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:16:59,918 INFO [finetune.py:976] (5/7) Epoch 12, batch 1350, loss[loss=0.2162, simple_loss=0.2697, pruned_loss=0.08138, over 4764.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.256, pruned_loss=0.06233, over 957231.15 frames. ], batch size: 28, lr: 3.66e-03, grad_scale: 32.0 +2023-03-26 14:17:16,046 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64379.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:17:33,431 INFO [finetune.py:976] (5/7) Epoch 12, batch 1400, loss[loss=0.1742, simple_loss=0.2384, pruned_loss=0.055, over 4778.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2586, pruned_loss=0.06274, over 956413.59 frames. ], batch size: 26, lr: 3.66e-03, grad_scale: 32.0 +2023-03-26 14:17:34,189 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64406.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:17:46,543 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2623, 1.2574, 1.5044, 1.0687, 1.1863, 1.4410, 1.2406, 1.5952], + device='cuda:5'), covar=tensor([0.1279, 0.2228, 0.1417, 0.1465, 0.0970, 0.1220, 0.2912, 0.0813], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0205, 0.0195, 0.0191, 0.0179, 0.0214, 0.0218, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:17:54,255 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.617e+02 1.936e+02 2.295e+02 3.610e+02, threshold=3.872e+02, percent-clipped=0.0 +2023-03-26 14:18:06,658 INFO [finetune.py:976] (5/7) Epoch 12, batch 1450, loss[loss=0.168, simple_loss=0.2483, pruned_loss=0.04391, over 4899.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2597, pruned_loss=0.06278, over 956709.47 frames. ], batch size: 35, lr: 3.66e-03, grad_scale: 32.0 +2023-03-26 14:18:13,337 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64465.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:18:13,905 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64466.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:18:37,438 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64501.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:18:39,730 INFO [finetune.py:976] (5/7) Epoch 12, batch 1500, loss[loss=0.2538, simple_loss=0.3163, pruned_loss=0.0957, over 4824.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2624, pruned_loss=0.06399, over 955649.99 frames. ], batch size: 49, lr: 3.66e-03, grad_scale: 32.0 +2023-03-26 14:18:46,090 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64514.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:18:54,932 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64526.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:18:58,576 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5219, 1.0615, 0.8436, 1.3665, 1.9397, 0.7644, 1.2087, 1.3885], + device='cuda:5'), covar=tensor([0.1597, 0.2268, 0.1920, 0.1307, 0.1964, 0.2041, 0.1593, 0.2027], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0113, 0.0092, 0.0119, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:19:01,494 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.738e+02 2.083e+02 2.672e+02 4.064e+02, threshold=4.165e+02, percent-clipped=1.0 +2023-03-26 14:19:15,861 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64549.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:19:19,449 INFO [finetune.py:976] (5/7) Epoch 12, batch 1550, loss[loss=0.1572, simple_loss=0.2396, pruned_loss=0.03747, over 4849.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2621, pruned_loss=0.06388, over 955790.43 frames. ], batch size: 44, lr: 3.66e-03, grad_scale: 32.0 +2023-03-26 14:19:30,783 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6580, 3.8613, 3.5586, 1.8398, 3.9870, 3.0171, 0.6826, 2.7091], + device='cuda:5'), covar=tensor([0.2360, 0.1839, 0.1639, 0.3207, 0.0930, 0.0929, 0.4502, 0.1375], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0173, 0.0159, 0.0128, 0.0155, 0.0122, 0.0145, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 14:19:33,901 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64572.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:19:45,160 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64588.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:19:45,815 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64589.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:19:56,456 INFO [finetune.py:976] (5/7) Epoch 12, batch 1600, loss[loss=0.2278, simple_loss=0.2914, pruned_loss=0.08208, over 4857.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2612, pruned_loss=0.06407, over 955453.61 frames. ], batch size: 44, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:20:03,295 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2028, 2.1070, 2.0814, 2.1428, 1.8401, 3.9640, 1.9356, 2.5759], + device='cuda:5'), covar=tensor([0.2793, 0.2006, 0.1752, 0.2005, 0.1412, 0.0213, 0.2073, 0.0952], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0120, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:20:12,894 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64620.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:20:30,241 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.026e+02 1.633e+02 1.922e+02 2.431e+02 4.177e+02, threshold=3.845e+02, percent-clipped=1.0 +2023-03-26 14:20:34,374 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 14:20:41,559 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64648.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:20:43,313 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64650.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:20:49,613 INFO [finetune.py:976] (5/7) Epoch 12, batch 1650, loss[loss=0.1644, simple_loss=0.2307, pruned_loss=0.04909, over 4337.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2591, pruned_loss=0.06359, over 955717.25 frames. ], batch size: 19, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:20:50,997 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9252, 1.3981, 2.0132, 1.7314, 1.6006, 1.5980, 1.6912, 1.7987], + device='cuda:5'), covar=tensor([0.3898, 0.4373, 0.3510, 0.4119, 0.4938, 0.3930, 0.4651, 0.3141], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0238, 0.0256, 0.0259, 0.0257, 0.0231, 0.0274, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:21:05,206 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64679.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:21:19,538 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64701.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:21:21,766 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64703.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:21:22,890 INFO [finetune.py:976] (5/7) Epoch 12, batch 1700, loss[loss=0.1313, simple_loss=0.1955, pruned_loss=0.03352, over 4772.00 frames. ], tot_loss[loss=0.1914, simple_loss=0.2568, pruned_loss=0.06305, over 958286.91 frames. ], batch size: 26, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:21:27,422 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64709.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:21:46,818 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64727.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:21:53,447 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.591e+02 1.930e+02 2.225e+02 5.420e+02, threshold=3.861e+02, percent-clipped=2.0 +2023-03-26 14:21:58,943 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64745.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:22:03,116 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64752.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:22:05,307 INFO [finetune.py:976] (5/7) Epoch 12, batch 1750, loss[loss=0.1876, simple_loss=0.2434, pruned_loss=0.06587, over 4759.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2573, pruned_loss=0.06294, over 956332.17 frames. ], batch size: 26, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:22:10,818 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64764.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:22:11,453 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9347, 1.7560, 1.5084, 1.7378, 1.6832, 1.5936, 1.6787, 2.4416], + device='cuda:5'), covar=tensor([0.4519, 0.4858, 0.3729, 0.4391, 0.4413, 0.2792, 0.4379, 0.1717], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0258, 0.0223, 0.0275, 0.0242, 0.0210, 0.0246, 0.0216], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:22:17,941 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6125, 2.4122, 2.0344, 1.0316, 2.2726, 1.9392, 1.8392, 2.2448], + device='cuda:5'), covar=tensor([0.0786, 0.0864, 0.1582, 0.2171, 0.1598, 0.2319, 0.2125, 0.0928], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0199, 0.0201, 0.0185, 0.0215, 0.0207, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:22:28,397 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5901, 2.3013, 1.2083, 2.3707, 2.9298, 2.2875, 2.5114, 2.4702], + device='cuda:5'), covar=tensor([0.1317, 0.1676, 0.1899, 0.1068, 0.1592, 0.1519, 0.1163, 0.1673], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0113, 0.0092, 0.0119, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:22:38,082 INFO [finetune.py:976] (5/7) Epoch 12, batch 1800, loss[loss=0.1567, simple_loss=0.2279, pruned_loss=0.04279, over 4766.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2604, pruned_loss=0.06373, over 956092.93 frames. ], batch size: 27, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:22:38,811 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64806.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:22:43,509 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64813.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:22:48,350 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64821.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:22:58,985 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.145e+02 1.622e+02 1.968e+02 2.271e+02 4.247e+02, threshold=3.936e+02, percent-clipped=1.0 +2023-03-26 14:23:11,389 INFO [finetune.py:976] (5/7) Epoch 12, batch 1850, loss[loss=0.2226, simple_loss=0.2847, pruned_loss=0.08019, over 4147.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2624, pruned_loss=0.06446, over 954820.32 frames. ], batch size: 65, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:23:33,494 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64888.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:23:45,144 INFO [finetune.py:976] (5/7) Epoch 12, batch 1900, loss[loss=0.2139, simple_loss=0.2739, pruned_loss=0.07691, over 4730.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2636, pruned_loss=0.0646, over 953960.26 frames. ], batch size: 54, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:24:06,071 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=64936.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:24:06,595 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.024e+02 1.653e+02 1.911e+02 2.364e+02 4.358e+02, threshold=3.822e+02, percent-clipped=3.0 +2023-03-26 14:24:12,013 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64945.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:24:18,923 INFO [finetune.py:976] (5/7) Epoch 12, batch 1950, loss[loss=0.1533, simple_loss=0.2142, pruned_loss=0.04617, over 4713.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2612, pruned_loss=0.06353, over 954297.74 frames. ], batch size: 23, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:24:40,823 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64976.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:24:51,014 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 14:24:58,110 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65001.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:24:59,918 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65004.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:25:00,454 INFO [finetune.py:976] (5/7) Epoch 12, batch 2000, loss[loss=0.1594, simple_loss=0.239, pruned_loss=0.03987, over 4813.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.2579, pruned_loss=0.06184, over 955425.39 frames. ], batch size: 41, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:25:21,641 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.609e+02 1.880e+02 2.233e+02 7.388e+02, threshold=3.760e+02, percent-clipped=1.0 +2023-03-26 14:25:21,787 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65037.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 14:25:34,388 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65049.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:25:42,377 INFO [finetune.py:976] (5/7) Epoch 12, batch 2050, loss[loss=0.1883, simple_loss=0.2544, pruned_loss=0.06109, over 4802.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2555, pruned_loss=0.06172, over 954159.26 frames. ], batch size: 29, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:25:42,506 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65055.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:25:45,411 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65059.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:26:21,863 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65101.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:26:21,911 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2082, 2.2125, 2.1417, 1.5963, 2.1265, 2.3196, 2.2378, 1.7975], + device='cuda:5'), covar=tensor([0.0617, 0.0576, 0.0746, 0.0923, 0.0634, 0.0729, 0.0614, 0.1078], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0134, 0.0142, 0.0125, 0.0122, 0.0144, 0.0144, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:26:24,691 INFO [finetune.py:976] (5/7) Epoch 12, batch 2100, loss[loss=0.1742, simple_loss=0.2483, pruned_loss=0.05006, over 4915.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.2557, pruned_loss=0.06202, over 954933.87 frames. ], batch size: 37, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:26:27,110 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65108.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:26:32,508 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65116.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:26:35,473 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65121.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:26:52,597 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.674e+02 1.985e+02 2.398e+02 5.597e+02, threshold=3.971e+02, percent-clipped=1.0 +2023-03-26 14:27:08,170 INFO [finetune.py:976] (5/7) Epoch 12, batch 2150, loss[loss=0.207, simple_loss=0.2751, pruned_loss=0.0695, over 4722.00 frames. ], tot_loss[loss=0.191, simple_loss=0.2581, pruned_loss=0.06195, over 955018.78 frames. ], batch size: 23, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:27:17,728 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65169.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:27:41,484 INFO [finetune.py:976] (5/7) Epoch 12, batch 2200, loss[loss=0.184, simple_loss=0.257, pruned_loss=0.0555, over 4766.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2609, pruned_loss=0.06332, over 954526.42 frames. ], batch size: 28, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:27:58,015 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-26 14:28:03,265 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.670e+02 2.055e+02 2.491e+02 4.530e+02, threshold=4.111e+02, percent-clipped=2.0 +2023-03-26 14:28:08,802 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65245.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:28:15,257 INFO [finetune.py:976] (5/7) Epoch 12, batch 2250, loss[loss=0.1492, simple_loss=0.2134, pruned_loss=0.0425, over 4729.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2627, pruned_loss=0.06433, over 953616.36 frames. ], batch size: 23, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:28:36,004 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65285.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:28:41,278 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65293.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:28:48,516 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65304.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:28:49,048 INFO [finetune.py:976] (5/7) Epoch 12, batch 2300, loss[loss=0.175, simple_loss=0.2492, pruned_loss=0.05045, over 4866.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2623, pruned_loss=0.06369, over 953747.76 frames. ], batch size: 34, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:28:50,390 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2009, 2.0571, 1.6820, 2.2347, 2.1809, 1.8784, 2.5525, 2.1439], + device='cuda:5'), covar=tensor([0.1357, 0.2550, 0.3447, 0.2898, 0.2817, 0.1883, 0.3896, 0.2041], + device='cuda:5'), in_proj_covar=tensor([0.0178, 0.0188, 0.0234, 0.0255, 0.0243, 0.0199, 0.0215, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:29:02,353 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2404, 1.9826, 1.4047, 0.5705, 1.7343, 1.9612, 1.8095, 1.7673], + device='cuda:5'), covar=tensor([0.1013, 0.0836, 0.1646, 0.2124, 0.1464, 0.2304, 0.2073, 0.0949], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0199, 0.0200, 0.0185, 0.0215, 0.0207, 0.0223, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:29:07,487 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65332.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 14:29:10,446 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.717e+01 1.673e+02 1.949e+02 2.267e+02 6.743e+02, threshold=3.897e+02, percent-clipped=1.0 +2023-03-26 14:29:13,633 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2065, 1.7549, 2.2241, 2.0985, 1.8675, 1.8918, 2.0414, 1.9131], + device='cuda:5'), covar=tensor([0.4134, 0.4572, 0.3502, 0.4304, 0.5191, 0.4076, 0.5693, 0.3711], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0239, 0.0257, 0.0261, 0.0259, 0.0233, 0.0276, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:29:16,548 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65346.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:29:20,082 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65352.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:29:22,339 INFO [finetune.py:976] (5/7) Epoch 12, batch 2350, loss[loss=0.184, simple_loss=0.2536, pruned_loss=0.05717, over 4935.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.2599, pruned_loss=0.06354, over 954097.46 frames. ], batch size: 38, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:29:24,830 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65359.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:29:46,175 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 14:30:02,648 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65401.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:30:04,996 INFO [finetune.py:976] (5/7) Epoch 12, batch 2400, loss[loss=0.1799, simple_loss=0.2478, pruned_loss=0.05603, over 4825.00 frames. ], tot_loss[loss=0.1912, simple_loss=0.2573, pruned_loss=0.06252, over 955907.82 frames. ], batch size: 41, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:30:06,757 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65407.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:30:07,397 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65408.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:30:09,186 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65411.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:30:10,459 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8540, 1.7778, 1.8648, 1.2176, 1.8046, 1.8907, 1.7857, 1.6079], + device='cuda:5'), covar=tensor([0.0463, 0.0615, 0.0648, 0.0843, 0.1077, 0.0530, 0.0541, 0.0972], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0134, 0.0142, 0.0125, 0.0122, 0.0142, 0.0144, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:30:11,485 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.74 vs. limit=5.0 +2023-03-26 14:30:26,324 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.534e+02 1.885e+02 2.327e+02 5.518e+02, threshold=3.771e+02, percent-clipped=1.0 +2023-03-26 14:30:34,673 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65449.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:30:38,806 INFO [finetune.py:976] (5/7) Epoch 12, batch 2450, loss[loss=0.1712, simple_loss=0.2436, pruned_loss=0.04933, over 4905.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2555, pruned_loss=0.06219, over 955864.57 frames. ], batch size: 43, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:30:39,469 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65456.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:31:18,366 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7780, 1.8673, 1.6873, 1.4036, 2.1411, 2.2227, 1.9051, 1.8399], + device='cuda:5'), covar=tensor([0.0405, 0.0360, 0.0510, 0.0416, 0.0312, 0.0519, 0.0345, 0.0447], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0107, 0.0139, 0.0113, 0.0101, 0.0103, 0.0092, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.0724e-05, 8.3628e-05, 1.1012e-04, 8.8227e-05, 7.9253e-05, 7.6338e-05, + 6.9733e-05, 8.2625e-05], device='cuda:5') +2023-03-26 14:31:31,329 INFO [finetune.py:976] (5/7) Epoch 12, batch 2500, loss[loss=0.1937, simple_loss=0.2661, pruned_loss=0.06069, over 4871.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2569, pruned_loss=0.06284, over 954419.17 frames. ], batch size: 31, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:31:39,796 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3750, 1.1276, 1.2200, 1.2381, 1.5457, 1.4585, 1.3371, 1.1688], + device='cuda:5'), covar=tensor([0.0284, 0.0304, 0.0576, 0.0299, 0.0251, 0.0475, 0.0323, 0.0376], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0108, 0.0139, 0.0113, 0.0102, 0.0103, 0.0093, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.0867e-05, 8.3834e-05, 1.1031e-04, 8.8409e-05, 7.9430e-05, 7.6628e-05, + 6.9938e-05, 8.2947e-05], device='cuda:5') +2023-03-26 14:31:49,232 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65532.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:31:52,646 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.473e+01 1.650e+02 2.020e+02 2.341e+02 4.049e+02, threshold=4.040e+02, percent-clipped=1.0 +2023-03-26 14:31:59,786 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5630, 1.4886, 2.0711, 1.8723, 1.8155, 4.1348, 1.4860, 1.7807], + device='cuda:5'), covar=tensor([0.0978, 0.1798, 0.1240, 0.1012, 0.1597, 0.0197, 0.1437, 0.1692], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0078, 0.0093, 0.0082, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:32:06,902 INFO [finetune.py:976] (5/7) Epoch 12, batch 2550, loss[loss=0.2124, simple_loss=0.2771, pruned_loss=0.07383, over 4891.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2613, pruned_loss=0.06395, over 955559.42 frames. ], batch size: 32, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:32:07,593 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-03-26 14:32:18,863 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.8108, 4.2018, 4.3869, 4.5744, 4.5467, 4.3158, 4.9013, 1.5049], + device='cuda:5'), covar=tensor([0.0724, 0.0804, 0.0697, 0.0880, 0.1201, 0.1424, 0.0576, 0.5559], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0242, 0.0276, 0.0291, 0.0330, 0.0281, 0.0300, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:32:26,120 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1933, 1.3416, 1.3916, 0.7816, 1.2567, 1.5384, 1.6068, 1.2785], + device='cuda:5'), covar=tensor([0.1031, 0.0610, 0.0528, 0.0545, 0.0524, 0.0673, 0.0361, 0.0750], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0153, 0.0122, 0.0132, 0.0131, 0.0126, 0.0143, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.3404e-05, 1.1211e-04, 8.7861e-05, 9.5088e-05, 9.2842e-05, 9.1585e-05, + 1.0428e-04, 1.0676e-04], device='cuda:5') +2023-03-26 14:32:27,312 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1880, 2.1246, 2.1779, 1.4999, 2.0119, 2.3072, 2.1233, 1.7363], + device='cuda:5'), covar=tensor([0.0563, 0.0661, 0.0705, 0.0997, 0.0681, 0.0707, 0.0721, 0.1187], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0134, 0.0142, 0.0125, 0.0123, 0.0143, 0.0144, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:32:40,933 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65593.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 14:32:43,278 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5467, 1.4550, 2.2275, 3.1859, 2.2292, 2.2925, 1.1124, 2.5204], + device='cuda:5'), covar=tensor([0.1717, 0.1516, 0.1222, 0.0622, 0.0757, 0.1674, 0.1748, 0.0566], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0134, 0.0165, 0.0101, 0.0138, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:32:48,714 INFO [finetune.py:976] (5/7) Epoch 12, batch 2600, loss[loss=0.2683, simple_loss=0.3272, pruned_loss=0.1047, over 4812.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2621, pruned_loss=0.0641, over 953456.22 frames. ], batch size: 40, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:32:55,956 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1543, 2.0984, 1.6832, 1.9919, 2.1095, 1.8042, 2.4399, 2.1449], + device='cuda:5'), covar=tensor([0.1383, 0.2137, 0.3198, 0.2835, 0.2833, 0.1847, 0.3110, 0.1835], + device='cuda:5'), in_proj_covar=tensor([0.0179, 0.0189, 0.0235, 0.0257, 0.0244, 0.0200, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:32:57,697 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6746, 1.5349, 2.2088, 3.3747, 2.3763, 2.3326, 0.8932, 2.7048], + device='cuda:5'), covar=tensor([0.1623, 0.1388, 0.1163, 0.0615, 0.0739, 0.1429, 0.1816, 0.0520], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0115, 0.0134, 0.0165, 0.0100, 0.0138, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:33:06,619 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65632.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:33:10,007 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.524e+01 1.645e+02 2.049e+02 2.494e+02 4.393e+02, threshold=4.097e+02, percent-clipped=1.0 +2023-03-26 14:33:12,521 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65641.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:33:22,370 INFO [finetune.py:976] (5/7) Epoch 12, batch 2650, loss[loss=0.2193, simple_loss=0.2933, pruned_loss=0.07262, over 4742.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2629, pruned_loss=0.06407, over 954147.07 frames. ], batch size: 54, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:33:38,618 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65680.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:33:55,609 INFO [finetune.py:976] (5/7) Epoch 12, batch 2700, loss[loss=0.2088, simple_loss=0.2702, pruned_loss=0.07364, over 4773.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2611, pruned_loss=0.06271, over 955875.41 frames. ], batch size: 28, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:33:59,778 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65711.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:34:17,010 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.115e+02 1.547e+02 1.884e+02 2.200e+02 3.210e+02, threshold=3.769e+02, percent-clipped=0.0 +2023-03-26 14:34:19,574 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-26 14:34:30,258 INFO [finetune.py:976] (5/7) Epoch 12, batch 2750, loss[loss=0.1651, simple_loss=0.2281, pruned_loss=0.05104, over 4904.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2588, pruned_loss=0.06236, over 954996.31 frames. ], batch size: 36, lr: 3.65e-03, grad_scale: 32.0 +2023-03-26 14:34:38,468 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65759.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:35:30,778 INFO [finetune.py:976] (5/7) Epoch 12, batch 2800, loss[loss=0.2224, simple_loss=0.2757, pruned_loss=0.08457, over 4737.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2548, pruned_loss=0.06083, over 955222.14 frames. ], batch size: 59, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:35:52,228 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.914e+01 1.578e+02 1.887e+02 2.176e+02 5.167e+02, threshold=3.774e+02, percent-clipped=1.0 +2023-03-26 14:35:52,974 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65838.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:35:54,728 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9097, 1.4029, 0.9458, 1.8356, 2.3035, 1.3347, 1.6482, 1.7073], + device='cuda:5'), covar=tensor([0.1528, 0.2024, 0.1993, 0.1137, 0.1796, 0.1910, 0.1426, 0.2103], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0113, 0.0092, 0.0120, 0.0094, 0.0100, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:36:04,209 INFO [finetune.py:976] (5/7) Epoch 12, batch 2850, loss[loss=0.2066, simple_loss=0.2831, pruned_loss=0.06508, over 4838.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2534, pruned_loss=0.06019, over 953835.02 frames. ], batch size: 47, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:36:36,965 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65888.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 14:36:45,247 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65899.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:36:48,787 INFO [finetune.py:976] (5/7) Epoch 12, batch 2900, loss[loss=0.1742, simple_loss=0.2389, pruned_loss=0.05482, over 4363.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2549, pruned_loss=0.06052, over 954282.10 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:37:10,261 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.527e+02 1.842e+02 2.377e+02 4.547e+02, threshold=3.684e+02, percent-clipped=3.0 +2023-03-26 14:37:12,790 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65941.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:37:13,912 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6851, 1.5423, 1.5000, 1.5532, 1.1322, 3.3364, 1.1734, 1.7487], + device='cuda:5'), covar=tensor([0.3225, 0.2379, 0.2166, 0.2384, 0.1800, 0.0230, 0.2536, 0.1285], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0119, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:37:27,695 INFO [finetune.py:976] (5/7) Epoch 12, batch 2950, loss[loss=0.2158, simple_loss=0.2815, pruned_loss=0.07508, over 4927.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.259, pruned_loss=0.06211, over 955652.21 frames. ], batch size: 38, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:37:38,567 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-03-26 14:38:02,433 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=65989.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:38:11,458 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7571, 1.6487, 2.0791, 1.3370, 1.8776, 2.0547, 1.5764, 2.2175], + device='cuda:5'), covar=tensor([0.1367, 0.2057, 0.1302, 0.1897, 0.0942, 0.1439, 0.2637, 0.0772], + device='cuda:5'), in_proj_covar=tensor([0.0197, 0.0206, 0.0195, 0.0191, 0.0179, 0.0216, 0.0218, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:38:26,400 INFO [finetune.py:976] (5/7) Epoch 12, batch 3000, loss[loss=0.1782, simple_loss=0.2529, pruned_loss=0.05172, over 4838.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2613, pruned_loss=0.06371, over 955508.49 frames. ], batch size: 30, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:38:26,400 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 14:38:37,073 INFO [finetune.py:1010] (5/7) Epoch 12, validation: loss=0.1571, simple_loss=0.2281, pruned_loss=0.04309, over 2265189.00 frames. +2023-03-26 14:38:37,073 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 14:38:58,512 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.619e+02 1.943e+02 2.343e+02 4.325e+02, threshold=3.886e+02, percent-clipped=3.0 +2023-03-26 14:39:05,048 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 14:39:21,450 INFO [finetune.py:976] (5/7) Epoch 12, batch 3050, loss[loss=0.1709, simple_loss=0.2446, pruned_loss=0.04859, over 4785.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2631, pruned_loss=0.06432, over 957642.55 frames. ], batch size: 26, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:40:36,353 INFO [finetune.py:976] (5/7) Epoch 12, batch 3100, loss[loss=0.1977, simple_loss=0.2631, pruned_loss=0.06616, over 4894.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2608, pruned_loss=0.06347, over 957339.42 frames. ], batch size: 35, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:41:12,551 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.7650, 4.0986, 4.3297, 4.5482, 4.5250, 4.2964, 4.8296, 1.5246], + device='cuda:5'), covar=tensor([0.0610, 0.0708, 0.0666, 0.0792, 0.0929, 0.1201, 0.0482, 0.5416], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0243, 0.0277, 0.0292, 0.0330, 0.0283, 0.0301, 0.0298], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:41:19,789 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.628e+02 1.972e+02 2.398e+02 4.316e+02, threshold=3.945e+02, percent-clipped=3.0 +2023-03-26 14:41:27,558 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66144.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:41:40,147 INFO [finetune.py:976] (5/7) Epoch 12, batch 3150, loss[loss=0.2157, simple_loss=0.2691, pruned_loss=0.08113, over 4825.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2585, pruned_loss=0.06336, over 952430.56 frames. ], batch size: 41, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:41:48,946 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66160.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 14:42:24,560 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66188.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 14:42:33,446 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66194.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 14:42:40,455 INFO [finetune.py:976] (5/7) Epoch 12, batch 3200, loss[loss=0.2303, simple_loss=0.294, pruned_loss=0.08326, over 4806.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2549, pruned_loss=0.06189, over 953435.26 frames. ], batch size: 41, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:42:40,559 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66205.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:42:51,195 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66221.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 14:43:01,313 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=66236.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:43:01,823 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.059e+02 1.602e+02 1.878e+02 2.419e+02 4.134e+02, threshold=3.755e+02, percent-clipped=1.0 +2023-03-26 14:43:14,215 INFO [finetune.py:976] (5/7) Epoch 12, batch 3250, loss[loss=0.2202, simple_loss=0.2862, pruned_loss=0.07712, over 4865.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2572, pruned_loss=0.06345, over 953038.77 frames. ], batch size: 34, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:43:16,230 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66258.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:43:48,268 INFO [finetune.py:976] (5/7) Epoch 12, batch 3300, loss[loss=0.2366, simple_loss=0.3107, pruned_loss=0.08127, over 4819.00 frames. ], tot_loss[loss=0.1938, simple_loss=0.2598, pruned_loss=0.06395, over 954668.68 frames. ], batch size: 40, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:43:56,991 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66319.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:44:14,651 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 1.642e+02 1.904e+02 2.370e+02 4.024e+02, threshold=3.808e+02, percent-clipped=2.0 +2023-03-26 14:44:19,638 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5114, 1.4684, 1.9462, 1.8852, 1.7290, 3.5124, 1.4783, 1.6757], + device='cuda:5'), covar=tensor([0.0945, 0.1841, 0.1175, 0.0960, 0.1551, 0.0255, 0.1437, 0.1781], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0078, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:44:29,722 INFO [finetune.py:976] (5/7) Epoch 12, batch 3350, loss[loss=0.225, simple_loss=0.2875, pruned_loss=0.08127, over 4822.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2612, pruned_loss=0.06375, over 955742.55 frames. ], batch size: 30, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:44:38,918 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 14:44:42,464 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6462, 1.4974, 1.4842, 1.5197, 1.0468, 3.5824, 1.3992, 1.8169], + device='cuda:5'), covar=tensor([0.3371, 0.2559, 0.2235, 0.2518, 0.1961, 0.0169, 0.2518, 0.1335], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0124, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:44:49,423 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8571, 1.2371, 0.8228, 1.6523, 2.1923, 1.1129, 1.5656, 1.6068], + device='cuda:5'), covar=tensor([0.1473, 0.2205, 0.2035, 0.1165, 0.1865, 0.1965, 0.1477, 0.2086], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0112, 0.0092, 0.0120, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:45:02,615 INFO [finetune.py:976] (5/7) Epoch 12, batch 3400, loss[loss=0.1771, simple_loss=0.2411, pruned_loss=0.05661, over 4759.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2624, pruned_loss=0.06366, over 956821.42 frames. ], batch size: 26, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:45:24,437 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.693e+02 1.994e+02 2.432e+02 3.824e+02, threshold=3.988e+02, percent-clipped=2.0 +2023-03-26 14:45:36,091 INFO [finetune.py:976] (5/7) Epoch 12, batch 3450, loss[loss=0.2046, simple_loss=0.2615, pruned_loss=0.07382, over 4873.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2617, pruned_loss=0.06362, over 957320.52 frames. ], batch size: 34, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:45:43,965 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-26 14:46:02,771 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66494.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:46:06,887 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66500.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 14:46:09,848 INFO [finetune.py:976] (5/7) Epoch 12, batch 3500, loss[loss=0.1815, simple_loss=0.2414, pruned_loss=0.06076, over 4917.00 frames. ], tot_loss[loss=0.1933, simple_loss=0.2598, pruned_loss=0.06344, over 956148.43 frames. ], batch size: 43, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:46:20,799 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66516.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 14:46:23,825 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66521.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:46:29,342 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 14:46:36,431 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.655e+02 1.937e+02 2.486e+02 6.010e+02, threshold=3.875e+02, percent-clipped=2.0 +2023-03-26 14:46:39,446 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0223, 1.8756, 1.7014, 1.8814, 1.4369, 4.5090, 1.7382, 2.1461], + device='cuda:5'), covar=tensor([0.3096, 0.2367, 0.2072, 0.2215, 0.1629, 0.0102, 0.2494, 0.1264], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0115, 0.0098, 0.0098, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:46:40,006 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=66542.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 14:46:57,619 INFO [finetune.py:976] (5/7) Epoch 12, batch 3550, loss[loss=0.197, simple_loss=0.2675, pruned_loss=0.06327, over 4836.00 frames. ], tot_loss[loss=0.1921, simple_loss=0.2574, pruned_loss=0.06337, over 956283.83 frames. ], batch size: 33, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:47:23,091 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66582.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 14:47:24,901 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2642, 3.7362, 3.9830, 4.0904, 4.0584, 3.8120, 4.3322, 1.5834], + device='cuda:5'), covar=tensor([0.0866, 0.0902, 0.0819, 0.1124, 0.1340, 0.1664, 0.0802, 0.5082], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0245, 0.0278, 0.0293, 0.0331, 0.0285, 0.0302, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:47:43,651 INFO [finetune.py:976] (5/7) Epoch 12, batch 3600, loss[loss=0.1866, simple_loss=0.2561, pruned_loss=0.05859, over 4835.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2545, pruned_loss=0.06243, over 952636.51 frames. ], batch size: 38, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:47:44,102 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 14:47:53,889 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66614.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:47:58,476 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4655, 2.2001, 1.6025, 0.7403, 1.9360, 1.9866, 1.9307, 1.9053], + device='cuda:5'), covar=tensor([0.0783, 0.0825, 0.1742, 0.2076, 0.1376, 0.2350, 0.1987, 0.0939], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0198, 0.0201, 0.0185, 0.0214, 0.0208, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:48:08,740 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.078e+02 1.581e+02 1.999e+02 2.430e+02 3.919e+02, threshold=3.999e+02, percent-clipped=1.0 +2023-03-26 14:48:09,529 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-26 14:48:21,114 INFO [finetune.py:976] (5/7) Epoch 12, batch 3650, loss[loss=0.2373, simple_loss=0.3003, pruned_loss=0.08716, over 4908.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2578, pruned_loss=0.06369, over 954779.92 frames. ], batch size: 37, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:48:32,290 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9573, 1.3574, 0.7191, 1.8964, 2.1759, 1.6640, 1.4748, 1.7659], + device='cuda:5'), covar=tensor([0.2187, 0.2966, 0.2868, 0.1720, 0.2571, 0.2980, 0.2107, 0.2947], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0112, 0.0091, 0.0120, 0.0093, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:48:40,585 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 14:48:46,725 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1067, 1.8671, 1.6137, 1.8348, 1.8023, 1.7513, 1.7465, 2.6218], + device='cuda:5'), covar=tensor([0.4604, 0.5010, 0.3945, 0.4253, 0.4396, 0.2826, 0.4555, 0.1907], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0261, 0.0225, 0.0278, 0.0245, 0.0212, 0.0248, 0.0219], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:48:54,888 INFO [finetune.py:976] (5/7) Epoch 12, batch 3700, loss[loss=0.1845, simple_loss=0.2579, pruned_loss=0.05559, over 4805.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2609, pruned_loss=0.06438, over 953221.09 frames. ], batch size: 45, lr: 3.64e-03, grad_scale: 64.0 +2023-03-26 14:49:14,930 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6699, 1.5860, 1.5628, 1.6336, 1.2173, 3.7142, 1.4383, 2.0325], + device='cuda:5'), covar=tensor([0.3347, 0.2505, 0.2028, 0.2215, 0.1723, 0.0147, 0.2649, 0.1269], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0116, 0.0098, 0.0098, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:49:14,945 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66731.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:49:18,458 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.675e+02 1.999e+02 2.466e+02 3.717e+02, threshold=3.997e+02, percent-clipped=0.0 +2023-03-26 14:49:38,726 INFO [finetune.py:976] (5/7) Epoch 12, batch 3750, loss[loss=0.2197, simple_loss=0.2817, pruned_loss=0.07883, over 4897.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2624, pruned_loss=0.06473, over 952131.14 frames. ], batch size: 36, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:50:03,088 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66792.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:50:09,004 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66800.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:50:12,432 INFO [finetune.py:976] (5/7) Epoch 12, batch 3800, loss[loss=0.238, simple_loss=0.2933, pruned_loss=0.09136, over 4912.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2638, pruned_loss=0.06532, over 955034.04 frames. ], batch size: 36, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:50:19,260 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66816.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 14:50:34,227 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.668e+02 2.101e+02 2.666e+02 4.038e+02, threshold=4.202e+02, percent-clipped=1.0 +2023-03-26 14:50:40,362 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=66848.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:50:45,466 INFO [finetune.py:976] (5/7) Epoch 12, batch 3850, loss[loss=0.1974, simple_loss=0.2578, pruned_loss=0.06853, over 4862.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2618, pruned_loss=0.06423, over 954055.55 frames. ], batch size: 34, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:50:51,377 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=66864.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 14:51:00,116 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66877.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 14:51:18,888 INFO [finetune.py:976] (5/7) Epoch 12, batch 3900, loss[loss=0.1563, simple_loss=0.2223, pruned_loss=0.04509, over 4695.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2586, pruned_loss=0.0631, over 954950.15 frames. ], batch size: 23, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:51:24,971 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66914.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:51:40,885 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.325e+01 1.578e+02 1.785e+02 2.294e+02 5.103e+02, threshold=3.570e+02, percent-clipped=1.0 +2023-03-26 14:51:51,226 INFO [finetune.py:976] (5/7) Epoch 12, batch 3950, loss[loss=0.2218, simple_loss=0.2698, pruned_loss=0.08687, over 4909.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.2548, pruned_loss=0.06147, over 955192.02 frames. ], batch size: 35, lr: 3.64e-03, grad_scale: 32.0 +2023-03-26 14:51:53,024 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66957.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:51:58,483 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=66962.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:51:58,606 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-26 14:52:46,940 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 14:52:47,110 INFO [finetune.py:976] (5/7) Epoch 12, batch 4000, loss[loss=0.2105, simple_loss=0.2862, pruned_loss=0.06743, over 4906.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2538, pruned_loss=0.06081, over 955675.96 frames. ], batch size: 43, lr: 3.63e-03, grad_scale: 32.0 +2023-03-26 14:53:04,498 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67018.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 14:53:15,639 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-03-26 14:53:18,612 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.442e+01 1.595e+02 2.014e+02 2.521e+02 4.335e+02, threshold=4.027e+02, percent-clipped=3.0 +2023-03-26 14:53:28,874 INFO [finetune.py:976] (5/7) Epoch 12, batch 4050, loss[loss=0.1858, simple_loss=0.2621, pruned_loss=0.05479, over 4881.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2569, pruned_loss=0.06181, over 956306.49 frames. ], batch size: 32, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:53:31,040 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 14:53:45,660 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7241, 1.5743, 1.4639, 1.7855, 2.2283, 1.8557, 1.2112, 1.4127], + device='cuda:5'), covar=tensor([0.2316, 0.2124, 0.2006, 0.1629, 0.1649, 0.1182, 0.2620, 0.2000], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0208, 0.0211, 0.0191, 0.0242, 0.0183, 0.0214, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:53:47,900 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67083.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:53:50,807 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67087.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 14:54:02,033 INFO [finetune.py:976] (5/7) Epoch 12, batch 4100, loss[loss=0.1944, simple_loss=0.2599, pruned_loss=0.06443, over 4172.00 frames. ], tot_loss[loss=0.1938, simple_loss=0.2607, pruned_loss=0.06342, over 954685.13 frames. ], batch size: 65, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:54:15,037 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7416, 1.6361, 1.5703, 1.6716, 1.1448, 3.6203, 1.3981, 1.8825], + device='cuda:5'), covar=tensor([0.3528, 0.2567, 0.2198, 0.2530, 0.2018, 0.0244, 0.2675, 0.1376], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0116, 0.0121, 0.0124, 0.0116, 0.0099, 0.0099, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:54:29,985 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.725e+02 1.998e+02 2.409e+02 3.172e+02, threshold=3.997e+02, percent-clipped=0.0 +2023-03-26 14:54:34,104 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67144.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:54:44,209 INFO [finetune.py:976] (5/7) Epoch 12, batch 4150, loss[loss=0.1796, simple_loss=0.2402, pruned_loss=0.05951, over 4690.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2618, pruned_loss=0.06366, over 952365.71 frames. ], batch size: 59, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:54:59,070 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67177.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 14:55:17,547 INFO [finetune.py:976] (5/7) Epoch 12, batch 4200, loss[loss=0.183, simple_loss=0.2415, pruned_loss=0.06227, over 4817.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2627, pruned_loss=0.06393, over 953590.77 frames. ], batch size: 30, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:55:18,804 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7809, 1.3305, 0.9787, 1.5937, 2.1324, 1.2562, 1.4628, 1.6486], + device='cuda:5'), covar=tensor([0.1416, 0.2106, 0.2070, 0.1244, 0.1904, 0.2011, 0.1561, 0.2000], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0096, 0.0113, 0.0091, 0.0120, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 14:55:30,583 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=67225.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 14:55:36,870 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-26 14:55:39,459 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.066e+02 1.549e+02 1.852e+02 2.427e+02 4.145e+02, threshold=3.704e+02, percent-clipped=1.0 +2023-03-26 14:55:41,913 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6224, 2.5054, 2.0616, 0.9761, 2.3082, 1.9646, 1.8429, 2.2517], + device='cuda:5'), covar=tensor([0.1020, 0.0741, 0.1602, 0.2278, 0.1477, 0.2451, 0.2307, 0.1025], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0197, 0.0198, 0.0184, 0.0212, 0.0205, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:55:50,531 INFO [finetune.py:976] (5/7) Epoch 12, batch 4250, loss[loss=0.1633, simple_loss=0.2338, pruned_loss=0.04638, over 4891.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2596, pruned_loss=0.06276, over 954218.97 frames. ], batch size: 36, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:55:55,534 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2016, 3.6667, 3.8155, 4.0349, 3.9473, 3.7837, 4.2932, 1.4658], + device='cuda:5'), covar=tensor([0.0745, 0.0801, 0.0868, 0.0915, 0.1124, 0.1398, 0.0642, 0.5169], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0242, 0.0274, 0.0289, 0.0327, 0.0280, 0.0300, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:55:58,671 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9221, 1.4294, 1.9279, 1.8905, 1.6470, 1.6110, 1.7618, 1.7605], + device='cuda:5'), covar=tensor([0.3904, 0.4203, 0.3491, 0.3642, 0.4940, 0.3844, 0.4925, 0.3508], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0238, 0.0254, 0.0260, 0.0257, 0.0232, 0.0273, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:56:21,832 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6121, 1.4686, 1.3093, 1.5957, 1.6136, 1.6787, 0.9275, 1.3460], + device='cuda:5'), covar=tensor([0.2317, 0.2196, 0.2119, 0.1776, 0.1667, 0.1229, 0.2852, 0.2075], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0206, 0.0209, 0.0189, 0.0239, 0.0181, 0.0212, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:56:32,187 INFO [finetune.py:976] (5/7) Epoch 12, batch 4300, loss[loss=0.2157, simple_loss=0.2705, pruned_loss=0.08046, over 4800.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2563, pruned_loss=0.06161, over 955090.27 frames. ], batch size: 51, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:56:32,333 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2286, 2.0814, 1.7857, 2.0927, 2.1383, 1.8715, 2.5329, 2.1921], + device='cuda:5'), covar=tensor([0.1334, 0.2208, 0.2956, 0.2651, 0.2565, 0.1676, 0.3017, 0.1754], + device='cuda:5'), in_proj_covar=tensor([0.0179, 0.0188, 0.0234, 0.0257, 0.0244, 0.0200, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:56:37,172 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67313.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 14:56:54,399 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.954e+01 1.550e+02 1.913e+02 2.348e+02 5.397e+02, threshold=3.825e+02, percent-clipped=3.0 +2023-03-26 14:57:05,082 INFO [finetune.py:976] (5/7) Epoch 12, batch 4350, loss[loss=0.1822, simple_loss=0.2441, pruned_loss=0.06022, over 4811.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2546, pruned_loss=0.06167, over 956520.97 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:57:07,522 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2862, 1.9988, 1.9668, 2.1921, 2.6543, 2.1890, 2.0099, 1.7391], + device='cuda:5'), covar=tensor([0.2147, 0.2006, 0.1799, 0.1563, 0.1814, 0.1117, 0.2236, 0.1752], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0206, 0.0210, 0.0190, 0.0240, 0.0182, 0.0213, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 14:57:28,524 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67387.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 14:57:39,914 INFO [finetune.py:976] (5/7) Epoch 12, batch 4400, loss[loss=0.2621, simple_loss=0.3212, pruned_loss=0.1016, over 4805.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2567, pruned_loss=0.06278, over 955889.49 frames. ], batch size: 41, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:58:14,147 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=67435.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 14:58:16,971 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.656e+02 1.972e+02 2.339e+02 4.406e+02, threshold=3.944e+02, percent-clipped=2.0 +2023-03-26 14:58:20,627 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67439.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:58:30,810 INFO [finetune.py:976] (5/7) Epoch 12, batch 4450, loss[loss=0.2354, simple_loss=0.315, pruned_loss=0.07789, over 4907.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2605, pruned_loss=0.06433, over 951041.13 frames. ], batch size: 37, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:59:03,973 INFO [finetune.py:976] (5/7) Epoch 12, batch 4500, loss[loss=0.192, simple_loss=0.2625, pruned_loss=0.06077, over 4818.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2623, pruned_loss=0.06505, over 951216.09 frames. ], batch size: 39, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:59:09,851 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67513.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 14:59:26,033 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.686e+02 1.980e+02 2.352e+02 4.001e+02, threshold=3.961e+02, percent-clipped=1.0 +2023-03-26 14:59:35,455 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8163, 4.0791, 3.8297, 2.1182, 4.2025, 3.1947, 1.2316, 2.8284], + device='cuda:5'), covar=tensor([0.2254, 0.1521, 0.1429, 0.3186, 0.1000, 0.0990, 0.4162, 0.1455], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0175, 0.0161, 0.0130, 0.0157, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 14:59:37,241 INFO [finetune.py:976] (5/7) Epoch 12, batch 4550, loss[loss=0.2718, simple_loss=0.3227, pruned_loss=0.1104, over 4743.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2636, pruned_loss=0.06531, over 951461.56 frames. ], batch size: 54, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 14:59:55,596 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5960, 1.5306, 1.8258, 1.8739, 1.6767, 3.3188, 1.4800, 1.6237], + device='cuda:5'), covar=tensor([0.0983, 0.1918, 0.1087, 0.0989, 0.1717, 0.0305, 0.1667, 0.1979], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 14:59:56,263 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67574.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 15:00:19,986 INFO [finetune.py:976] (5/7) Epoch 12, batch 4600, loss[loss=0.2294, simple_loss=0.2894, pruned_loss=0.08468, over 4899.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2625, pruned_loss=0.06431, over 952956.67 frames. ], batch size: 37, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:00:24,954 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67613.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:00:30,653 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0461, 2.0438, 2.0666, 1.3984, 2.1303, 2.2049, 2.1065, 1.7911], + device='cuda:5'), covar=tensor([0.0580, 0.0570, 0.0673, 0.0852, 0.0623, 0.0632, 0.0577, 0.1060], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0133, 0.0141, 0.0124, 0.0122, 0.0141, 0.0142, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:00:42,114 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.172e+02 1.477e+02 1.878e+02 2.272e+02 4.960e+02, threshold=3.756e+02, percent-clipped=1.0 +2023-03-26 15:00:53,234 INFO [finetune.py:976] (5/7) Epoch 12, batch 4650, loss[loss=0.1529, simple_loss=0.2251, pruned_loss=0.04032, over 4786.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2593, pruned_loss=0.06335, over 953837.23 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:00:56,981 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=67661.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:01:10,453 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67680.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:01:12,337 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.93 vs. limit=5.0 +2023-03-26 15:01:17,695 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67692.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:01:31,300 INFO [finetune.py:976] (5/7) Epoch 12, batch 4700, loss[loss=0.1559, simple_loss=0.2252, pruned_loss=0.04328, over 4810.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2559, pruned_loss=0.06208, over 956089.32 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:01:32,679 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5497, 2.2097, 2.0498, 2.3962, 2.2229, 2.2049, 2.1833, 2.9358], + device='cuda:5'), covar=tensor([0.3981, 0.4860, 0.3705, 0.4230, 0.4259, 0.2704, 0.4367, 0.1856], + device='cuda:5'), in_proj_covar=tensor([0.0283, 0.0258, 0.0223, 0.0276, 0.0243, 0.0210, 0.0245, 0.0218], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:01:56,970 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.553e+02 1.823e+02 2.116e+02 3.808e+02, threshold=3.646e+02, percent-clipped=1.0 +2023-03-26 15:01:57,076 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67739.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:01:58,308 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67741.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:02:05,942 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67753.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 15:02:07,551 INFO [finetune.py:976] (5/7) Epoch 12, batch 4750, loss[loss=0.2126, simple_loss=0.2912, pruned_loss=0.06703, over 4836.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.254, pruned_loss=0.0615, over 956749.41 frames. ], batch size: 47, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:02:28,911 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=67787.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:02:32,057 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5246, 2.4316, 2.1014, 2.6917, 2.5044, 2.0945, 3.0882, 2.5310], + device='cuda:5'), covar=tensor([0.1369, 0.2403, 0.3048, 0.2739, 0.2679, 0.1589, 0.2959, 0.1875], + device='cuda:5'), in_proj_covar=tensor([0.0179, 0.0188, 0.0234, 0.0256, 0.0243, 0.0199, 0.0214, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:02:40,330 INFO [finetune.py:976] (5/7) Epoch 12, batch 4800, loss[loss=0.2433, simple_loss=0.297, pruned_loss=0.09478, over 4738.00 frames. ], tot_loss[loss=0.1921, simple_loss=0.2576, pruned_loss=0.06331, over 956012.68 frames. ], batch size: 59, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:03:07,511 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.086e+02 1.756e+02 1.975e+02 2.556e+02 4.813e+02, threshold=3.950e+02, percent-clipped=3.0 +2023-03-26 15:03:25,929 INFO [finetune.py:976] (5/7) Epoch 12, batch 4850, loss[loss=0.2217, simple_loss=0.2807, pruned_loss=0.08134, over 4083.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2604, pruned_loss=0.0638, over 954620.67 frames. ], batch size: 65, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:03:39,909 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67869.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 15:04:03,161 INFO [finetune.py:976] (5/7) Epoch 12, batch 4900, loss[loss=0.1905, simple_loss=0.2626, pruned_loss=0.05922, over 4742.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2607, pruned_loss=0.06365, over 954785.36 frames. ], batch size: 59, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:04:20,352 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3035, 2.9366, 3.0230, 3.2505, 3.0837, 2.8791, 3.3209, 0.9915], + device='cuda:5'), covar=tensor([0.1026, 0.0973, 0.1115, 0.0998, 0.1542, 0.1810, 0.1086, 0.5300], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0242, 0.0275, 0.0290, 0.0328, 0.0281, 0.0299, 0.0292], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:04:26,940 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.717e+02 1.971e+02 2.418e+02 4.222e+02, threshold=3.942e+02, percent-clipped=1.0 +2023-03-26 15:04:36,657 INFO [finetune.py:976] (5/7) Epoch 12, batch 4950, loss[loss=0.1884, simple_loss=0.2531, pruned_loss=0.06178, over 4819.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2613, pruned_loss=0.06347, over 954981.33 frames. ], batch size: 33, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:04:53,993 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67981.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:05:16,211 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5450, 1.4366, 1.4346, 1.4476, 1.0173, 2.9272, 1.0287, 1.4503], + device='cuda:5'), covar=tensor([0.3263, 0.2514, 0.2107, 0.2306, 0.1801, 0.0247, 0.2721, 0.1403], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 15:05:20,900 INFO [finetune.py:976] (5/7) Epoch 12, batch 5000, loss[loss=0.1672, simple_loss=0.2406, pruned_loss=0.0469, over 4869.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2582, pruned_loss=0.06197, over 954257.98 frames. ], batch size: 31, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:05:38,353 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-03-26 15:05:41,213 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68036.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:05:43,411 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.964e+01 1.543e+02 1.867e+02 2.301e+02 3.447e+02, threshold=3.734e+02, percent-clipped=0.0 +2023-03-26 15:05:46,818 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68042.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:05:50,385 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68048.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:05:54,511 INFO [finetune.py:976] (5/7) Epoch 12, batch 5050, loss[loss=0.1709, simple_loss=0.2423, pruned_loss=0.04979, over 4822.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2558, pruned_loss=0.06136, over 953031.60 frames. ], batch size: 30, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:06:15,504 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68087.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 15:06:18,512 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1855, 1.7789, 2.1749, 2.0979, 1.8665, 1.8590, 2.0665, 1.9703], + device='cuda:5'), covar=tensor([0.4503, 0.4757, 0.3732, 0.4441, 0.5585, 0.4310, 0.5899, 0.3635], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0240, 0.0257, 0.0263, 0.0259, 0.0234, 0.0276, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:06:20,658 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9973, 2.0771, 2.0428, 1.4122, 2.1103, 2.3033, 2.0969, 1.7697], + device='cuda:5'), covar=tensor([0.0659, 0.0611, 0.0777, 0.0983, 0.0557, 0.0656, 0.0657, 0.1063], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0133, 0.0142, 0.0125, 0.0122, 0.0141, 0.0142, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:06:27,691 INFO [finetune.py:976] (5/7) Epoch 12, batch 5100, loss[loss=0.1974, simple_loss=0.2648, pruned_loss=0.06498, over 4726.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2524, pruned_loss=0.06015, over 954289.77 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:06:59,404 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.056e+02 1.565e+02 1.837e+02 2.198e+02 4.078e+02, threshold=3.675e+02, percent-clipped=2.0 +2023-03-26 15:07:05,473 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68148.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:07:10,948 INFO [finetune.py:976] (5/7) Epoch 12, batch 5150, loss[loss=0.195, simple_loss=0.2568, pruned_loss=0.06665, over 4264.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2537, pruned_loss=0.06109, over 956045.70 frames. ], batch size: 65, lr: 3.63e-03, grad_scale: 16.0 +2023-03-26 15:07:19,523 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68169.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:07:43,717 INFO [finetune.py:976] (5/7) Epoch 12, batch 5200, loss[loss=0.2529, simple_loss=0.3202, pruned_loss=0.0928, over 4818.00 frames. ], tot_loss[loss=0.1904, simple_loss=0.2572, pruned_loss=0.06177, over 954501.48 frames. ], batch size: 40, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:07:51,093 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=68217.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:08:05,785 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.664e+02 1.889e+02 2.252e+02 3.665e+02, threshold=3.778e+02, percent-clipped=0.0 +2023-03-26 15:08:10,837 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.93 vs. limit=5.0 +2023-03-26 15:08:16,526 INFO [finetune.py:976] (5/7) Epoch 12, batch 5250, loss[loss=0.186, simple_loss=0.252, pruned_loss=0.06002, over 4744.00 frames. ], tot_loss[loss=0.1925, simple_loss=0.2595, pruned_loss=0.06278, over 952907.43 frames. ], batch size: 27, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:08:44,238 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6320, 1.4238, 1.1424, 1.3138, 1.8341, 1.7619, 1.5502, 1.2570], + device='cuda:5'), covar=tensor([0.0290, 0.0376, 0.0914, 0.0377, 0.0242, 0.0419, 0.0352, 0.0506], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0109, 0.0140, 0.0113, 0.0102, 0.0105, 0.0095, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.2532e-05, 8.4435e-05, 1.1114e-04, 8.8419e-05, 7.9875e-05, 7.7822e-05, + 7.2040e-05, 8.4139e-05], device='cuda:5') +2023-03-26 15:09:03,120 INFO [finetune.py:976] (5/7) Epoch 12, batch 5300, loss[loss=0.1749, simple_loss=0.2496, pruned_loss=0.05011, over 4816.00 frames. ], tot_loss[loss=0.1933, simple_loss=0.2603, pruned_loss=0.06315, over 951938.60 frames. ], batch size: 38, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:09:24,972 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68336.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:09:25,574 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68337.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:09:26,707 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.294e+02 1.838e+02 2.123e+02 2.651e+02 4.524e+02, threshold=4.245e+02, percent-clipped=5.0 +2023-03-26 15:09:32,265 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68348.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:09:34,090 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6266, 3.6562, 3.5313, 1.8448, 3.8167, 2.9056, 1.0723, 2.5203], + device='cuda:5'), covar=tensor([0.2154, 0.1713, 0.1406, 0.2907, 0.0947, 0.0938, 0.3866, 0.1434], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0173, 0.0159, 0.0128, 0.0155, 0.0120, 0.0145, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 15:09:36,486 INFO [finetune.py:976] (5/7) Epoch 12, batch 5350, loss[loss=0.1941, simple_loss=0.2702, pruned_loss=0.05895, over 4904.00 frames. ], tot_loss[loss=0.1919, simple_loss=0.2597, pruned_loss=0.06209, over 951494.40 frames. ], batch size: 36, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:09:55,983 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=68384.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:10:00,557 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6781, 0.7155, 1.7154, 1.6322, 1.5307, 1.4260, 1.5221, 1.6258], + device='cuda:5'), covar=tensor([0.3838, 0.4029, 0.3381, 0.3441, 0.4464, 0.3527, 0.4303, 0.3211], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0238, 0.0255, 0.0262, 0.0257, 0.0233, 0.0275, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:10:04,679 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=68396.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:10:06,586 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68399.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:10:10,286 INFO [finetune.py:976] (5/7) Epoch 12, batch 5400, loss[loss=0.1805, simple_loss=0.2393, pruned_loss=0.0608, over 4822.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2579, pruned_loss=0.06158, over 954116.77 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:10:40,850 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.541e+02 1.801e+02 2.082e+02 4.267e+02, threshold=3.602e+02, percent-clipped=1.0 +2023-03-26 15:10:44,356 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68443.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:10:51,598 INFO [finetune.py:976] (5/7) Epoch 12, batch 5450, loss[loss=0.1676, simple_loss=0.2415, pruned_loss=0.04681, over 4821.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2561, pruned_loss=0.06166, over 955982.20 frames. ], batch size: 38, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:10:54,745 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68460.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:11:24,506 INFO [finetune.py:976] (5/7) Epoch 12, batch 5500, loss[loss=0.1494, simple_loss=0.2181, pruned_loss=0.04039, over 4821.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2527, pruned_loss=0.06024, over 955152.01 frames. ], batch size: 39, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:11:47,048 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.509e+02 1.942e+02 2.407e+02 6.603e+02, threshold=3.884e+02, percent-clipped=3.0 +2023-03-26 15:11:59,912 INFO [finetune.py:976] (5/7) Epoch 12, batch 5550, loss[loss=0.2751, simple_loss=0.3255, pruned_loss=0.1123, over 4906.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.255, pruned_loss=0.06141, over 955251.41 frames. ], batch size: 43, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:12:18,618 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0057, 1.9411, 1.7658, 2.0502, 2.5224, 1.9564, 1.9914, 1.5905], + device='cuda:5'), covar=tensor([0.2121, 0.1952, 0.1841, 0.1631, 0.1968, 0.1146, 0.2163, 0.1763], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0206, 0.0209, 0.0189, 0.0239, 0.0181, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:12:20,794 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7936, 2.4870, 2.1115, 1.1083, 2.3140, 2.1412, 2.0232, 2.2940], + device='cuda:5'), covar=tensor([0.0622, 0.0772, 0.1452, 0.1932, 0.1293, 0.1889, 0.1683, 0.0914], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0196, 0.0199, 0.0184, 0.0212, 0.0207, 0.0221, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:12:23,732 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68578.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:12:39,661 INFO [finetune.py:976] (5/7) Epoch 12, batch 5600, loss[loss=0.2359, simple_loss=0.2941, pruned_loss=0.08881, over 4837.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2607, pruned_loss=0.06337, over 954987.85 frames. ], batch size: 30, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:12:58,320 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68637.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:12:59,421 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.664e+02 1.965e+02 2.319e+02 3.885e+02, threshold=3.931e+02, percent-clipped=1.0 +2023-03-26 15:12:59,525 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68639.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:13:09,176 INFO [finetune.py:976] (5/7) Epoch 12, batch 5650, loss[loss=0.1395, simple_loss=0.2182, pruned_loss=0.03034, over 4796.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2627, pruned_loss=0.06367, over 954267.27 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:13:27,861 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=68685.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:13:27,902 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68685.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:13:28,569 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2019, 2.0045, 1.8058, 1.9478, 1.8935, 1.9484, 1.9013, 2.5983], + device='cuda:5'), covar=tensor([0.3815, 0.4676, 0.3464, 0.3764, 0.3894, 0.2387, 0.4280, 0.1752], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0224, 0.0277, 0.0244, 0.0211, 0.0247, 0.0220], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:13:41,826 INFO [finetune.py:976] (5/7) Epoch 12, batch 5700, loss[loss=0.1637, simple_loss=0.2128, pruned_loss=0.05732, over 4038.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2589, pruned_loss=0.06314, over 939223.61 frames. ], batch size: 17, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:14:27,874 INFO [finetune.py:976] (5/7) Epoch 13, batch 0, loss[loss=0.2277, simple_loss=0.2865, pruned_loss=0.08445, over 4813.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2865, pruned_loss=0.08445, over 4813.00 frames. ], batch size: 39, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:14:27,875 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 15:14:30,657 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6683, 3.5320, 3.4760, 1.6157, 3.6417, 2.7991, 0.8773, 2.3111], + device='cuda:5'), covar=tensor([0.1859, 0.1626, 0.1261, 0.3013, 0.0950, 0.0930, 0.3437, 0.1606], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0173, 0.0160, 0.0127, 0.0155, 0.0121, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 15:14:42,136 INFO [finetune.py:1010] (5/7) Epoch 13, validation: loss=0.1598, simple_loss=0.23, pruned_loss=0.04482, over 2265189.00 frames. +2023-03-26 15:14:42,136 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 15:14:47,270 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.040e+02 1.546e+02 1.915e+02 2.253e+02 4.332e+02, threshold=3.830e+02, percent-clipped=1.0 +2023-03-26 15:14:49,783 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68743.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 15:14:52,134 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68746.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:14:58,498 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68755.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:15:00,402 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0855, 1.9688, 1.9324, 1.9963, 1.7037, 3.9786, 1.7570, 2.5058], + device='cuda:5'), covar=tensor([0.2970, 0.2207, 0.1802, 0.2145, 0.1453, 0.0204, 0.2208, 0.0981], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0119, 0.0123, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 15:15:13,123 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9900, 1.7996, 2.0729, 1.4154, 2.0245, 2.2298, 2.0195, 1.4206], + device='cuda:5'), covar=tensor([0.0705, 0.0900, 0.0753, 0.1065, 0.0735, 0.0637, 0.0766, 0.1758], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0133, 0.0142, 0.0125, 0.0122, 0.0141, 0.0142, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:15:15,982 INFO [finetune.py:976] (5/7) Epoch 13, batch 50, loss[loss=0.2025, simple_loss=0.2748, pruned_loss=0.06511, over 4900.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2635, pruned_loss=0.06788, over 214367.47 frames. ], batch size: 46, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:15:21,853 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=68791.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:15:22,515 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5709, 1.4477, 1.0261, 0.2923, 1.1962, 1.4400, 1.4572, 1.4238], + device='cuda:5'), covar=tensor([0.0832, 0.0801, 0.1184, 0.1757, 0.1275, 0.2091, 0.2126, 0.0700], + device='cuda:5'), in_proj_covar=tensor([0.0164, 0.0195, 0.0196, 0.0184, 0.0211, 0.0205, 0.0220, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:15:27,190 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7103, 0.6712, 1.7224, 1.6021, 1.5029, 1.4124, 1.4958, 1.6107], + device='cuda:5'), covar=tensor([0.3521, 0.3848, 0.3393, 0.3397, 0.4465, 0.3416, 0.4076, 0.3196], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0237, 0.0254, 0.0260, 0.0256, 0.0232, 0.0273, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:15:56,577 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0196, 1.8743, 1.6472, 1.8191, 2.0223, 1.7663, 2.2006, 2.0169], + device='cuda:5'), covar=tensor([0.1381, 0.2266, 0.3167, 0.2588, 0.2614, 0.1675, 0.3270, 0.1866], + device='cuda:5'), in_proj_covar=tensor([0.0177, 0.0186, 0.0231, 0.0253, 0.0240, 0.0198, 0.0211, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:15:57,665 INFO [finetune.py:976] (5/7) Epoch 13, batch 100, loss[loss=0.1801, simple_loss=0.2551, pruned_loss=0.05256, over 4813.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2556, pruned_loss=0.06172, over 380081.75 frames. ], batch size: 41, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:16:02,755 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.134e+02 1.681e+02 1.901e+02 2.429e+02 4.753e+02, threshold=3.802e+02, percent-clipped=2.0 +2023-03-26 15:16:07,211 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 15:16:31,426 INFO [finetune.py:976] (5/7) Epoch 13, batch 150, loss[loss=0.2072, simple_loss=0.2716, pruned_loss=0.07136, over 4879.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.2514, pruned_loss=0.06055, over 508122.26 frames. ], batch size: 34, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:16:59,137 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7134, 1.6077, 1.5393, 1.6509, 1.0859, 3.3300, 1.3177, 1.7467], + device='cuda:5'), covar=tensor([0.3097, 0.2192, 0.1995, 0.2030, 0.1765, 0.0204, 0.2476, 0.1243], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 15:17:05,108 INFO [finetune.py:976] (5/7) Epoch 13, batch 200, loss[loss=0.1861, simple_loss=0.2489, pruned_loss=0.0617, over 4772.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2497, pruned_loss=0.05957, over 608887.87 frames. ], batch size: 28, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:17:05,764 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68934.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:17:09,212 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.222e+02 1.603e+02 1.930e+02 2.189e+02 8.191e+02, threshold=3.861e+02, percent-clipped=2.0 +2023-03-26 15:17:46,317 INFO [finetune.py:976] (5/7) Epoch 13, batch 250, loss[loss=0.197, simple_loss=0.2609, pruned_loss=0.06655, over 4753.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2525, pruned_loss=0.06074, over 685612.24 frames. ], batch size: 28, lr: 3.62e-03, grad_scale: 16.0 +2023-03-26 15:18:05,434 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6358, 1.5304, 2.1394, 3.2776, 2.2892, 2.4154, 1.3186, 2.6362], + device='cuda:5'), covar=tensor([0.1792, 0.1443, 0.1288, 0.0658, 0.0788, 0.1211, 0.1600, 0.0625], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0116, 0.0134, 0.0165, 0.0101, 0.0138, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 15:18:19,714 INFO [finetune.py:976] (5/7) Epoch 13, batch 300, loss[loss=0.2054, simple_loss=0.2816, pruned_loss=0.06462, over 4833.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2573, pruned_loss=0.06265, over 743378.21 frames. ], batch size: 47, lr: 3.62e-03, grad_scale: 32.0 +2023-03-26 15:18:23,320 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.585e+02 1.877e+02 2.328e+02 4.201e+02, threshold=3.755e+02, percent-clipped=2.0 +2023-03-26 15:18:24,578 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69041.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:18:26,363 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69043.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:18:34,514 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69055.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:18:55,351 INFO [finetune.py:976] (5/7) Epoch 13, batch 350, loss[loss=0.2245, simple_loss=0.2916, pruned_loss=0.07873, over 4789.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.262, pruned_loss=0.06456, over 791568.33 frames. ], batch size: 29, lr: 3.62e-03, grad_scale: 32.0 +2023-03-26 15:19:18,983 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69103.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:19:19,636 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69104.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:19:41,451 INFO [finetune.py:976] (5/7) Epoch 13, batch 400, loss[loss=0.2054, simple_loss=0.2668, pruned_loss=0.07197, over 4805.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2625, pruned_loss=0.06452, over 828709.90 frames. ], batch size: 25, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:19:50,063 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.154e+02 1.689e+02 1.999e+02 2.345e+02 4.076e+02, threshold=3.998e+02, percent-clipped=3.0 +2023-03-26 15:20:09,384 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 15:20:09,754 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69162.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:20:13,414 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69168.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:20:23,368 INFO [finetune.py:976] (5/7) Epoch 13, batch 450, loss[loss=0.1897, simple_loss=0.2519, pruned_loss=0.0637, over 4724.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2609, pruned_loss=0.06355, over 858317.30 frames. ], batch size: 59, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:20:29,472 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5329, 1.4347, 1.2182, 1.5416, 1.6335, 1.5820, 0.9292, 1.2702], + device='cuda:5'), covar=tensor([0.2216, 0.2130, 0.2068, 0.1723, 0.1573, 0.1208, 0.2554, 0.1984], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0206, 0.0209, 0.0190, 0.0239, 0.0181, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:20:30,220 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 15:21:04,192 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69223.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:21:05,417 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69225.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:21:07,857 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69229.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:21:09,757 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-03-26 15:21:10,195 INFO [finetune.py:976] (5/7) Epoch 13, batch 500, loss[loss=0.2024, simple_loss=0.2652, pruned_loss=0.0698, over 4932.00 frames. ], tot_loss[loss=0.1925, simple_loss=0.2587, pruned_loss=0.0632, over 879424.34 frames. ], batch size: 33, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:21:10,904 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69234.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:21:14,298 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.659e+02 1.928e+02 2.205e+02 4.798e+02, threshold=3.855e+02, percent-clipped=1.0 +2023-03-26 15:21:18,090 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69245.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 15:21:34,731 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-26 15:21:37,033 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69273.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:21:43,321 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69282.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:21:43,877 INFO [finetune.py:976] (5/7) Epoch 13, batch 550, loss[loss=0.1732, simple_loss=0.2432, pruned_loss=0.05156, over 4907.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.2561, pruned_loss=0.0619, over 897487.61 frames. ], batch size: 43, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:21:45,840 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69286.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:21:59,426 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69306.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 15:22:08,857 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69320.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:22:17,555 INFO [finetune.py:976] (5/7) Epoch 13, batch 600, loss[loss=0.1805, simple_loss=0.249, pruned_loss=0.05603, over 4758.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.256, pruned_loss=0.06192, over 909821.98 frames. ], batch size: 54, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:22:18,302 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69334.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 15:22:21,204 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.059e+02 1.536e+02 1.861e+02 2.296e+02 3.946e+02, threshold=3.721e+02, percent-clipped=1.0 +2023-03-26 15:22:22,520 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69341.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:22:26,547 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.7853, 3.2434, 3.4463, 3.6550, 3.5689, 3.3031, 3.8660, 1.2628], + device='cuda:5'), covar=tensor([0.0820, 0.0888, 0.0837, 0.1004, 0.1213, 0.1531, 0.0772, 0.5201], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0242, 0.0275, 0.0290, 0.0326, 0.0281, 0.0300, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:22:50,023 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69368.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:22:58,854 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69381.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:22:59,984 INFO [finetune.py:976] (5/7) Epoch 13, batch 650, loss[loss=0.1367, simple_loss=0.2084, pruned_loss=0.0325, over 4720.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2599, pruned_loss=0.06303, over 919900.07 frames. ], batch size: 23, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:23:03,695 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69389.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:23:06,751 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69394.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:23:10,346 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69399.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:23:30,658 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69429.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:23:33,421 INFO [finetune.py:976] (5/7) Epoch 13, batch 700, loss[loss=0.1629, simple_loss=0.216, pruned_loss=0.05495, over 3967.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2603, pruned_loss=0.06286, over 927896.42 frames. ], batch size: 17, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:23:37,537 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.702e+02 1.957e+02 2.425e+02 4.096e+02, threshold=3.913e+02, percent-clipped=2.0 +2023-03-26 15:23:47,862 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69455.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:23:54,777 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5671, 1.4348, 1.4269, 1.4714, 0.9178, 2.9273, 1.0032, 1.4058], + device='cuda:5'), covar=tensor([0.3257, 0.2405, 0.2098, 0.2318, 0.1971, 0.0255, 0.2641, 0.1380], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 15:23:58,912 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3878, 3.7175, 4.0326, 4.2626, 4.1502, 3.8714, 4.4643, 1.3817], + device='cuda:5'), covar=tensor([0.0774, 0.0914, 0.1016, 0.1023, 0.1165, 0.1657, 0.0739, 0.5382], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0243, 0.0277, 0.0292, 0.0329, 0.0283, 0.0302, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:24:06,514 INFO [finetune.py:976] (5/7) Epoch 13, batch 750, loss[loss=0.1704, simple_loss=0.244, pruned_loss=0.04834, over 4761.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2616, pruned_loss=0.06329, over 934182.61 frames. ], batch size: 28, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:24:40,544 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69518.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:24:43,996 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5718, 1.4215, 1.9755, 3.1168, 2.1677, 2.3179, 0.9093, 2.5732], + device='cuda:5'), covar=tensor([0.2099, 0.1914, 0.1579, 0.0942, 0.0994, 0.1612, 0.2314, 0.0707], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0116, 0.0134, 0.0165, 0.0101, 0.0138, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 15:24:44,560 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69524.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:24:45,048 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-03-26 15:24:50,486 INFO [finetune.py:976] (5/7) Epoch 13, batch 800, loss[loss=0.18, simple_loss=0.2441, pruned_loss=0.05798, over 4932.00 frames. ], tot_loss[loss=0.1931, simple_loss=0.2607, pruned_loss=0.06277, over 939012.10 frames. ], batch size: 33, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:24:57,843 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.694e+02 1.982e+02 2.355e+02 4.334e+02, threshold=3.964e+02, percent-clipped=1.0 +2023-03-26 15:25:08,471 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7901, 1.7575, 1.5996, 2.0300, 2.2196, 1.9197, 1.6204, 1.4946], + device='cuda:5'), covar=tensor([0.2335, 0.2157, 0.1945, 0.1515, 0.1900, 0.1198, 0.2427, 0.2024], + device='cuda:5'), in_proj_covar=tensor([0.0238, 0.0207, 0.0209, 0.0190, 0.0240, 0.0182, 0.0212, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:25:47,578 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69581.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:25:48,751 INFO [finetune.py:976] (5/7) Epoch 13, batch 850, loss[loss=0.1706, simple_loss=0.2527, pruned_loss=0.0442, over 4889.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2592, pruned_loss=0.06242, over 945218.92 frames. ], batch size: 32, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:25:52,436 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2196, 2.1176, 2.2376, 0.9895, 2.5135, 2.6637, 2.3006, 1.9529], + device='cuda:5'), covar=tensor([0.0894, 0.0591, 0.0433, 0.0654, 0.0447, 0.0515, 0.0380, 0.0648], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0153, 0.0123, 0.0130, 0.0130, 0.0126, 0.0143, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.2788e-05, 1.1173e-04, 8.8324e-05, 9.3617e-05, 9.2401e-05, 9.1099e-05, + 1.0450e-04, 1.0596e-04], device='cuda:5') +2023-03-26 15:26:03,131 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69601.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:26:06,188 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4005, 2.3094, 1.8097, 2.5201, 2.2741, 1.9961, 2.8664, 2.4791], + device='cuda:5'), covar=tensor([0.1474, 0.2526, 0.3352, 0.3025, 0.2751, 0.1903, 0.3848, 0.1890], + device='cuda:5'), in_proj_covar=tensor([0.0180, 0.0188, 0.0234, 0.0256, 0.0244, 0.0200, 0.0214, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:26:19,779 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1851, 1.8105, 2.2122, 2.0769, 1.8292, 1.8638, 2.0723, 1.9987], + device='cuda:5'), covar=tensor([0.4264, 0.4909, 0.3553, 0.4444, 0.5734, 0.4025, 0.5245, 0.3552], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0238, 0.0255, 0.0261, 0.0257, 0.0233, 0.0275, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:26:21,351 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69629.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 15:26:24,227 INFO [finetune.py:976] (5/7) Epoch 13, batch 900, loss[loss=0.1466, simple_loss=0.2061, pruned_loss=0.04359, over 4880.00 frames. ], tot_loss[loss=0.191, simple_loss=0.2573, pruned_loss=0.06237, over 948023.75 frames. ], batch size: 34, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:26:27,891 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.145e+02 1.604e+02 1.856e+02 2.224e+02 3.601e+02, threshold=3.711e+02, percent-clipped=0.0 +2023-03-26 15:26:55,538 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69674.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:26:56,715 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69676.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:27:06,462 INFO [finetune.py:976] (5/7) Epoch 13, batch 950, loss[loss=0.1858, simple_loss=0.2483, pruned_loss=0.06162, over 4883.00 frames. ], tot_loss[loss=0.19, simple_loss=0.256, pruned_loss=0.06197, over 951542.44 frames. ], batch size: 32, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:27:29,365 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69699.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:27:40,283 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 15:28:02,975 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69724.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:28:05,491 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6314, 2.3670, 2.2092, 2.4629, 2.2539, 2.3486, 2.2474, 3.1225], + device='cuda:5'), covar=tensor([0.3746, 0.4658, 0.3096, 0.3920, 0.4300, 0.2487, 0.4402, 0.1442], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0223, 0.0276, 0.0244, 0.0211, 0.0246, 0.0220], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:28:08,355 INFO [finetune.py:976] (5/7) Epoch 13, batch 1000, loss[loss=0.241, simple_loss=0.3017, pruned_loss=0.0902, over 4910.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2575, pruned_loss=0.06259, over 952592.81 frames. ], batch size: 36, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:28:10,719 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69735.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 15:28:12,999 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.598e+02 1.856e+02 2.406e+02 4.029e+02, threshold=3.712e+02, percent-clipped=2.0 +2023-03-26 15:28:18,440 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69747.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:28:20,808 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69750.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:28:22,061 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6112, 1.4039, 2.1573, 3.4280, 2.4112, 2.3930, 0.8120, 2.6545], + device='cuda:5'), covar=tensor([0.1790, 0.1644, 0.1374, 0.0688, 0.0775, 0.1725, 0.2138, 0.0580], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0117, 0.0136, 0.0167, 0.0102, 0.0140, 0.0128, 0.0103], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 15:28:23,318 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4601, 1.3032, 1.1548, 1.4379, 1.5446, 1.4576, 0.9142, 1.2325], + device='cuda:5'), covar=tensor([0.2289, 0.2304, 0.2116, 0.1809, 0.1672, 0.1310, 0.2654, 0.1999], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0205, 0.0208, 0.0188, 0.0238, 0.0181, 0.0211, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:28:26,965 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3346, 2.9402, 3.1027, 3.2489, 3.1103, 2.8985, 3.3672, 1.0309], + device='cuda:5'), covar=tensor([0.1012, 0.1047, 0.0983, 0.1157, 0.1468, 0.1775, 0.0955, 0.5174], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0243, 0.0276, 0.0292, 0.0328, 0.0281, 0.0301, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:28:52,949 INFO [finetune.py:976] (5/7) Epoch 13, batch 1050, loss[loss=0.2254, simple_loss=0.2885, pruned_loss=0.08114, over 4934.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2603, pruned_loss=0.06352, over 952568.46 frames. ], batch size: 33, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:29:38,102 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69818.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:29:48,111 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69824.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:29:59,171 INFO [finetune.py:976] (5/7) Epoch 13, batch 1100, loss[loss=0.2054, simple_loss=0.2804, pruned_loss=0.06519, over 4891.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2619, pruned_loss=0.06382, over 952441.49 frames. ], batch size: 43, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:30:02,885 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.063e+02 1.609e+02 1.898e+02 2.282e+02 6.010e+02, threshold=3.795e+02, percent-clipped=2.0 +2023-03-26 15:30:12,680 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 15:30:35,892 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69866.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:30:43,316 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69872.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:30:51,896 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69881.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:30:53,488 INFO [finetune.py:976] (5/7) Epoch 13, batch 1150, loss[loss=0.2207, simple_loss=0.2762, pruned_loss=0.08263, over 4801.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2623, pruned_loss=0.06376, over 954684.64 frames. ], batch size: 51, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:30:58,475 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8545, 1.6139, 2.3050, 1.5543, 2.0629, 2.2014, 1.6274, 2.3783], + device='cuda:5'), covar=tensor([0.1452, 0.2231, 0.1558, 0.2098, 0.0903, 0.1499, 0.2964, 0.0839], + device='cuda:5'), in_proj_covar=tensor([0.0198, 0.0207, 0.0196, 0.0193, 0.0180, 0.0216, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:31:12,300 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69901.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 15:31:42,227 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69929.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:31:42,255 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69929.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:31:44,610 INFO [finetune.py:976] (5/7) Epoch 13, batch 1200, loss[loss=0.1707, simple_loss=0.2451, pruned_loss=0.04819, over 4898.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2599, pruned_loss=0.06245, over 954640.07 frames. ], batch size: 35, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:31:48,756 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.031e+02 1.603e+02 1.893e+02 2.321e+02 3.158e+02, threshold=3.786e+02, percent-clipped=0.0 +2023-03-26 15:31:55,833 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69949.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 15:32:13,218 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69976.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:32:13,760 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=69977.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:32:17,838 INFO [finetune.py:976] (5/7) Epoch 13, batch 1250, loss[loss=0.1388, simple_loss=0.2187, pruned_loss=0.02941, over 4791.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2563, pruned_loss=0.06118, over 954942.99 frames. ], batch size: 29, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:32:19,220 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-03-26 15:32:46,509 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=70024.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:32:46,537 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70024.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:32:50,168 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70030.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 15:32:52,398 INFO [finetune.py:976] (5/7) Epoch 13, batch 1300, loss[loss=0.2095, simple_loss=0.2637, pruned_loss=0.07763, over 4860.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2527, pruned_loss=0.05981, over 954803.90 frames. ], batch size: 31, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:32:55,572 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0352, 2.2654, 1.8547, 2.0446, 2.5857, 2.6305, 2.1769, 2.0140], + device='cuda:5'), covar=tensor([0.0345, 0.0297, 0.0534, 0.0316, 0.0278, 0.0375, 0.0353, 0.0390], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0140, 0.0113, 0.0101, 0.0105, 0.0095, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3021e-05, 8.4506e-05, 1.1081e-04, 8.7964e-05, 7.8928e-05, 7.8047e-05, + 7.1658e-05, 8.3490e-05], device='cuda:5') +2023-03-26 15:32:56,053 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.095e+02 1.649e+02 1.897e+02 2.309e+02 4.234e+02, threshold=3.795e+02, percent-clipped=2.0 +2023-03-26 15:32:59,640 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6283, 1.6920, 1.6282, 0.8993, 1.7109, 1.8948, 1.8588, 1.4930], + device='cuda:5'), covar=tensor([0.0855, 0.0551, 0.0418, 0.0499, 0.0372, 0.0541, 0.0317, 0.0618], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0120, 0.0128, 0.0128, 0.0125, 0.0141, 0.0143], + device='cuda:5'), out_proj_covar=tensor([9.1607e-05, 1.0987e-04, 8.6582e-05, 9.2314e-05, 9.1035e-05, 9.0440e-05, + 1.0287e-04, 1.0397e-04], device='cuda:5') +2023-03-26 15:33:03,814 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70050.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:33:19,137 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=70072.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:33:23,474 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3336, 2.1554, 2.8428, 1.8263, 2.5443, 2.8045, 2.0882, 2.8756], + device='cuda:5'), covar=tensor([0.1505, 0.2016, 0.1580, 0.2554, 0.0894, 0.1680, 0.2735, 0.0898], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0205, 0.0195, 0.0193, 0.0178, 0.0214, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:33:25,841 INFO [finetune.py:976] (5/7) Epoch 13, batch 1350, loss[loss=0.147, simple_loss=0.2097, pruned_loss=0.04217, over 4682.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2521, pruned_loss=0.05983, over 952714.36 frames. ], batch size: 23, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:33:34,005 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4054, 1.0175, 0.8330, 1.2884, 1.8558, 0.7489, 1.1581, 1.3601], + device='cuda:5'), covar=tensor([0.1569, 0.2190, 0.1712, 0.1298, 0.2053, 0.2051, 0.1491, 0.1861], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0096, 0.0113, 0.0093, 0.0120, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 15:33:36,440 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=70098.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:33:53,177 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70110.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:34:08,077 INFO [finetune.py:976] (5/7) Epoch 13, batch 1400, loss[loss=0.2091, simple_loss=0.2805, pruned_loss=0.06885, over 4796.00 frames. ], tot_loss[loss=0.1912, simple_loss=0.2581, pruned_loss=0.06212, over 955189.67 frames. ], batch size: 51, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:34:12,154 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.588e+02 1.939e+02 2.393e+02 8.943e+02, threshold=3.877e+02, percent-clipped=1.0 +2023-03-26 15:34:34,209 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70171.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:34:41,772 INFO [finetune.py:976] (5/7) Epoch 13, batch 1450, loss[loss=0.1856, simple_loss=0.269, pruned_loss=0.05109, over 4926.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2593, pruned_loss=0.06217, over 954121.90 frames. ], batch size: 38, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:35:26,434 INFO [finetune.py:976] (5/7) Epoch 13, batch 1500, loss[loss=0.1883, simple_loss=0.261, pruned_loss=0.05776, over 4834.00 frames. ], tot_loss[loss=0.1938, simple_loss=0.2614, pruned_loss=0.06312, over 953879.86 frames. ], batch size: 49, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:35:30,134 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.613e+02 1.899e+02 2.364e+02 4.350e+02, threshold=3.798e+02, percent-clipped=1.0 +2023-03-26 15:35:46,640 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 15:35:46,859 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70260.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:35:47,639 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-03-26 15:36:01,689 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1851, 1.2948, 1.5132, 1.0656, 1.1981, 1.4202, 1.2577, 1.5761], + device='cuda:5'), covar=tensor([0.1395, 0.1949, 0.1313, 0.1470, 0.0958, 0.1223, 0.3032, 0.0879], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0205, 0.0195, 0.0192, 0.0178, 0.0214, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:36:02,911 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6088, 1.6128, 1.5605, 1.0607, 1.6687, 1.9215, 1.8347, 1.4293], + device='cuda:5'), covar=tensor([0.1219, 0.0657, 0.0558, 0.0572, 0.0494, 0.0539, 0.0388, 0.0709], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0152, 0.0122, 0.0130, 0.0130, 0.0126, 0.0143, 0.0144], + device='cuda:5'), out_proj_covar=tensor([9.2753e-05, 1.1105e-04, 8.7899e-05, 9.3500e-05, 9.2544e-05, 9.1429e-05, + 1.0392e-04, 1.0480e-04], device='cuda:5') +2023-03-26 15:36:09,523 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-03-26 15:36:10,535 INFO [finetune.py:976] (5/7) Epoch 13, batch 1550, loss[loss=0.1773, simple_loss=0.2415, pruned_loss=0.0566, over 4825.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2603, pruned_loss=0.06247, over 956115.92 frames. ], batch size: 38, lr: 3.61e-03, grad_scale: 32.0 +2023-03-26 15:36:39,332 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4169, 1.2246, 1.2507, 1.3638, 1.6502, 1.4538, 1.3536, 1.1763], + device='cuda:5'), covar=tensor([0.0298, 0.0270, 0.0542, 0.0258, 0.0192, 0.0401, 0.0287, 0.0372], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0140, 0.0114, 0.0101, 0.0106, 0.0095, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.3155e-05, 8.4841e-05, 1.1100e-04, 8.8490e-05, 7.9130e-05, 7.8521e-05, + 7.1925e-05, 8.4156e-05], device='cuda:5') +2023-03-26 15:36:49,640 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70321.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:36:58,922 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70330.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 15:37:00,630 INFO [finetune.py:976] (5/7) Epoch 13, batch 1600, loss[loss=0.1708, simple_loss=0.2324, pruned_loss=0.05459, over 4750.00 frames. ], tot_loss[loss=0.1921, simple_loss=0.2592, pruned_loss=0.06252, over 955078.77 frames. ], batch size: 27, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:37:04,737 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.529e+02 1.873e+02 2.318e+02 5.550e+02, threshold=3.745e+02, percent-clipped=4.0 +2023-03-26 15:37:18,970 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6811, 1.3836, 1.2747, 1.3606, 1.8680, 1.8943, 1.5676, 1.3497], + device='cuda:5'), covar=tensor([0.0307, 0.0352, 0.0768, 0.0380, 0.0226, 0.0431, 0.0343, 0.0404], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0140, 0.0114, 0.0101, 0.0106, 0.0095, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.3081e-05, 8.4676e-05, 1.1105e-04, 8.8502e-05, 7.8999e-05, 7.8516e-05, + 7.1893e-05, 8.4142e-05], device='cuda:5') +2023-03-26 15:37:30,809 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=70378.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 15:37:34,185 INFO [finetune.py:976] (5/7) Epoch 13, batch 1650, loss[loss=0.175, simple_loss=0.2444, pruned_loss=0.05281, over 4776.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.2576, pruned_loss=0.06205, over 954730.42 frames. ], batch size: 29, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:37:39,814 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-03-26 15:38:01,521 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4904, 1.5973, 0.6773, 2.2422, 2.7418, 1.8204, 2.0827, 2.1913], + device='cuda:5'), covar=tensor([0.1399, 0.2076, 0.2373, 0.1140, 0.1578, 0.1759, 0.1340, 0.1836], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0096, 0.0113, 0.0093, 0.0121, 0.0095, 0.0100, 0.0091], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 15:38:08,088 INFO [finetune.py:976] (5/7) Epoch 13, batch 1700, loss[loss=0.1864, simple_loss=0.2512, pruned_loss=0.06082, over 4893.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2544, pruned_loss=0.06067, over 954582.41 frames. ], batch size: 35, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:38:11,734 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.265e+02 1.610e+02 1.926e+02 2.276e+02 4.227e+02, threshold=3.852e+02, percent-clipped=1.0 +2023-03-26 15:38:30,205 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70466.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:38:32,134 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0938, 2.0251, 1.6819, 1.8227, 2.0830, 1.8069, 2.2470, 2.0615], + device='cuda:5'), covar=tensor([0.1463, 0.2108, 0.3322, 0.2623, 0.2681, 0.1765, 0.3180, 0.1907], + device='cuda:5'), in_proj_covar=tensor([0.0180, 0.0188, 0.0234, 0.0256, 0.0245, 0.0200, 0.0214, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:38:41,456 INFO [finetune.py:976] (5/7) Epoch 13, batch 1750, loss[loss=0.1953, simple_loss=0.2685, pruned_loss=0.06109, over 4910.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2551, pruned_loss=0.06088, over 956659.98 frames. ], batch size: 37, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:38:46,312 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5396, 2.2596, 1.9653, 1.0219, 2.1568, 1.9573, 1.8245, 2.0913], + device='cuda:5'), covar=tensor([0.0737, 0.0800, 0.1419, 0.1947, 0.1378, 0.1937, 0.1897, 0.0878], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0196, 0.0199, 0.0185, 0.0213, 0.0206, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:39:19,078 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7201, 1.6101, 1.5966, 1.7336, 1.2737, 3.4042, 1.3995, 1.9181], + device='cuda:5'), covar=tensor([0.3086, 0.2349, 0.2057, 0.2260, 0.1745, 0.0209, 0.2446, 0.1186], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 15:39:24,241 INFO [finetune.py:976] (5/7) Epoch 13, batch 1800, loss[loss=0.2072, simple_loss=0.2852, pruned_loss=0.06457, over 4867.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2578, pruned_loss=0.06171, over 956976.91 frames. ], batch size: 44, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:39:28,347 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.316e+01 1.597e+02 2.051e+02 2.548e+02 3.844e+02, threshold=4.101e+02, percent-clipped=0.0 +2023-03-26 15:39:29,051 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8231, 3.3705, 3.2945, 1.9968, 3.5159, 2.7334, 1.1651, 2.5222], + device='cuda:5'), covar=tensor([0.3335, 0.1768, 0.1620, 0.2944, 0.1088, 0.1037, 0.3935, 0.1471], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0175, 0.0160, 0.0129, 0.0157, 0.0121, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 15:39:58,059 INFO [finetune.py:976] (5/7) Epoch 13, batch 1850, loss[loss=0.2032, simple_loss=0.2763, pruned_loss=0.06505, over 4346.00 frames. ], tot_loss[loss=0.1921, simple_loss=0.2599, pruned_loss=0.06222, over 956377.24 frames. ], batch size: 65, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:40:05,380 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0102, 1.7425, 2.3976, 1.6012, 2.1198, 2.3625, 1.6374, 2.3942], + device='cuda:5'), covar=tensor([0.1413, 0.2038, 0.1239, 0.2107, 0.0819, 0.1361, 0.2529, 0.0784], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0205, 0.0194, 0.0192, 0.0177, 0.0213, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:40:05,711 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.58 vs. limit=5.0 +2023-03-26 15:40:07,250 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1351, 2.0483, 1.6692, 2.0429, 2.0830, 1.7923, 2.3861, 2.1451], + device='cuda:5'), covar=tensor([0.1352, 0.2082, 0.3167, 0.2562, 0.2676, 0.1803, 0.3258, 0.1885], + device='cuda:5'), in_proj_covar=tensor([0.0179, 0.0187, 0.0232, 0.0254, 0.0243, 0.0199, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:40:26,914 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70616.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:40:42,677 INFO [finetune.py:976] (5/7) Epoch 13, batch 1900, loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05769, over 4805.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2608, pruned_loss=0.06261, over 955524.09 frames. ], batch size: 51, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:40:46,778 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.033e+02 1.570e+02 1.884e+02 2.217e+02 6.026e+02, threshold=3.769e+02, percent-clipped=2.0 +2023-03-26 15:40:54,174 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6595, 1.5575, 2.0585, 1.2902, 1.6615, 1.9197, 1.4989, 2.1150], + device='cuda:5'), covar=tensor([0.1131, 0.1918, 0.1019, 0.1602, 0.0843, 0.1133, 0.2752, 0.0685], + device='cuda:5'), in_proj_covar=tensor([0.0195, 0.0204, 0.0193, 0.0191, 0.0177, 0.0213, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:41:26,264 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5570, 1.4708, 1.3026, 1.6046, 1.8948, 1.8069, 1.5310, 1.3027], + device='cuda:5'), covar=tensor([0.0303, 0.0334, 0.0587, 0.0287, 0.0214, 0.0394, 0.0336, 0.0466], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0140, 0.0113, 0.0101, 0.0105, 0.0095, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.2975e-05, 8.4842e-05, 1.1114e-04, 8.8238e-05, 7.8803e-05, 7.8030e-05, + 7.2194e-05, 8.4153e-05], device='cuda:5') +2023-03-26 15:41:27,334 INFO [finetune.py:976] (5/7) Epoch 13, batch 1950, loss[loss=0.1831, simple_loss=0.2558, pruned_loss=0.05517, over 4933.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2584, pruned_loss=0.06128, over 953597.22 frames. ], batch size: 33, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:41:32,704 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1065, 2.8328, 2.5778, 1.5126, 2.7282, 2.2125, 2.1472, 2.4562], + device='cuda:5'), covar=tensor([0.1032, 0.0728, 0.1656, 0.2105, 0.1766, 0.2453, 0.2235, 0.1197], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0196, 0.0199, 0.0185, 0.0213, 0.0207, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:42:06,899 INFO [finetune.py:976] (5/7) Epoch 13, batch 2000, loss[loss=0.1998, simple_loss=0.2663, pruned_loss=0.06663, over 4826.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2563, pruned_loss=0.06096, over 954807.01 frames. ], batch size: 33, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:42:15,806 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.535e+02 1.807e+02 2.194e+02 3.140e+02, threshold=3.615e+02, percent-clipped=0.0 +2023-03-26 15:42:31,729 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-26 15:42:36,902 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70766.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:42:48,488 INFO [finetune.py:976] (5/7) Epoch 13, batch 2050, loss[loss=0.1763, simple_loss=0.2412, pruned_loss=0.05569, over 4910.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2527, pruned_loss=0.05985, over 954862.77 frames. ], batch size: 36, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:43:09,369 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=70814.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:43:22,316 INFO [finetune.py:976] (5/7) Epoch 13, batch 2100, loss[loss=0.1849, simple_loss=0.266, pruned_loss=0.05187, over 4853.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2529, pruned_loss=0.06039, over 953257.09 frames. ], batch size: 47, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:43:26,465 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.827e+01 1.609e+02 1.892e+02 2.240e+02 3.187e+02, threshold=3.783e+02, percent-clipped=0.0 +2023-03-26 15:43:29,558 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8924, 1.7464, 1.6093, 1.4401, 1.9546, 1.6664, 1.8174, 1.8864], + device='cuda:5'), covar=tensor([0.1489, 0.2024, 0.3025, 0.2586, 0.2636, 0.1777, 0.2779, 0.1866], + device='cuda:5'), in_proj_covar=tensor([0.0180, 0.0189, 0.0235, 0.0256, 0.0245, 0.0201, 0.0214, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:43:56,102 INFO [finetune.py:976] (5/7) Epoch 13, batch 2150, loss[loss=0.1898, simple_loss=0.262, pruned_loss=0.05882, over 4866.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2561, pruned_loss=0.06176, over 952732.29 frames. ], batch size: 31, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:44:35,063 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70916.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:44:46,793 INFO [finetune.py:976] (5/7) Epoch 13, batch 2200, loss[loss=0.2196, simple_loss=0.2772, pruned_loss=0.08094, over 4816.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2574, pruned_loss=0.06135, over 953725.84 frames. ], batch size: 39, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:44:50,484 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.006e+02 1.701e+02 1.958e+02 2.316e+02 4.574e+02, threshold=3.916e+02, percent-clipped=1.0 +2023-03-26 15:45:07,668 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=70964.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:45:19,185 INFO [finetune.py:976] (5/7) Epoch 13, batch 2250, loss[loss=0.2607, simple_loss=0.3073, pruned_loss=0.107, over 4841.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2592, pruned_loss=0.06219, over 953763.18 frames. ], batch size: 44, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:45:26,615 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70992.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:45:31,900 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1070, 1.7729, 2.4754, 4.1202, 2.9239, 2.8371, 1.0575, 3.4051], + device='cuda:5'), covar=tensor([0.1578, 0.1494, 0.1346, 0.0361, 0.0659, 0.1420, 0.1884, 0.0363], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0117, 0.0135, 0.0166, 0.0102, 0.0139, 0.0128, 0.0103], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 15:45:48,839 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-03-26 15:46:02,067 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71030.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:46:03,710 INFO [finetune.py:976] (5/7) Epoch 13, batch 2300, loss[loss=0.1962, simple_loss=0.2744, pruned_loss=0.05904, over 4802.00 frames. ], tot_loss[loss=0.1914, simple_loss=0.2594, pruned_loss=0.06169, over 954381.18 frames. ], batch size: 40, lr: 3.60e-03, grad_scale: 64.0 +2023-03-26 15:46:08,249 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.237e+01 1.685e+02 2.000e+02 2.324e+02 3.629e+02, threshold=3.999e+02, percent-clipped=0.0 +2023-03-26 15:46:14,854 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5424, 1.4907, 1.4111, 1.6041, 1.8714, 1.6628, 1.5521, 1.3072], + device='cuda:5'), covar=tensor([0.0289, 0.0274, 0.0546, 0.0256, 0.0204, 0.0428, 0.0322, 0.0409], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0110, 0.0142, 0.0114, 0.0102, 0.0106, 0.0096, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.3931e-05, 8.5407e-05, 1.1248e-04, 8.9040e-05, 7.9957e-05, 7.8898e-05, + 7.2845e-05, 8.4695e-05], device='cuda:5') +2023-03-26 15:46:23,766 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71053.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:46:59,590 INFO [finetune.py:976] (5/7) Epoch 13, batch 2350, loss[loss=0.2176, simple_loss=0.2609, pruned_loss=0.0872, over 4909.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2559, pruned_loss=0.06051, over 954524.41 frames. ], batch size: 43, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:47:10,754 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71091.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:47:39,523 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0393, 1.7588, 2.3976, 3.9951, 2.8733, 2.8024, 0.9985, 3.2962], + device='cuda:5'), covar=tensor([0.1736, 0.1391, 0.1420, 0.0460, 0.0703, 0.1567, 0.1940, 0.0458], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0117, 0.0135, 0.0165, 0.0102, 0.0139, 0.0128, 0.0103], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 15:47:45,785 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1838, 2.0063, 2.3546, 1.7453, 2.1768, 2.4286, 1.9490, 2.5496], + device='cuda:5'), covar=tensor([0.1208, 0.1650, 0.1303, 0.1618, 0.0814, 0.1104, 0.2257, 0.0709], + device='cuda:5'), in_proj_covar=tensor([0.0197, 0.0206, 0.0195, 0.0192, 0.0179, 0.0215, 0.0218, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:47:57,478 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.22 vs. limit=5.0 +2023-03-26 15:48:00,797 INFO [finetune.py:976] (5/7) Epoch 13, batch 2400, loss[loss=0.1251, simple_loss=0.1937, pruned_loss=0.02827, over 4799.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2527, pruned_loss=0.05974, over 955120.41 frames. ], batch size: 25, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:48:09,286 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.076e+01 1.502e+02 1.791e+02 2.104e+02 3.987e+02, threshold=3.583e+02, percent-clipped=0.0 +2023-03-26 15:48:32,306 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 15:48:39,254 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1322, 2.1342, 2.2227, 1.5886, 2.2129, 2.3381, 2.2885, 1.9694], + device='cuda:5'), covar=tensor([0.0662, 0.0653, 0.0679, 0.0865, 0.0667, 0.0686, 0.0601, 0.0997], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0132, 0.0143, 0.0125, 0.0124, 0.0143, 0.0142, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:49:05,624 INFO [finetune.py:976] (5/7) Epoch 13, batch 2450, loss[loss=0.1447, simple_loss=0.2115, pruned_loss=0.03901, over 3882.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2504, pruned_loss=0.05899, over 954589.52 frames. ], batch size: 17, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:49:53,880 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9048, 1.4380, 1.9750, 1.8074, 1.6733, 1.6049, 1.8255, 1.7894], + device='cuda:5'), covar=tensor([0.3305, 0.3454, 0.2811, 0.3219, 0.4138, 0.3378, 0.3683, 0.2735], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0240, 0.0257, 0.0264, 0.0261, 0.0236, 0.0277, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:49:57,499 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 15:50:04,537 INFO [finetune.py:976] (5/7) Epoch 13, batch 2500, loss[loss=0.1868, simple_loss=0.2674, pruned_loss=0.05309, over 4814.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2513, pruned_loss=0.059, over 953515.55 frames. ], batch size: 39, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:50:08,818 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.134e+02 1.629e+02 1.890e+02 2.415e+02 4.682e+02, threshold=3.780e+02, percent-clipped=4.0 +2023-03-26 15:50:20,182 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9504, 1.8536, 1.5873, 1.8125, 1.7671, 1.7605, 1.7577, 2.5327], + device='cuda:5'), covar=tensor([0.3991, 0.4405, 0.3541, 0.4260, 0.4425, 0.2431, 0.4315, 0.1599], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0224, 0.0278, 0.0246, 0.0212, 0.0248, 0.0222], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:50:41,419 INFO [finetune.py:976] (5/7) Epoch 13, batch 2550, loss[loss=0.1856, simple_loss=0.2583, pruned_loss=0.05642, over 4904.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2556, pruned_loss=0.06066, over 953950.36 frames. ], batch size: 37, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:51:22,575 INFO [finetune.py:976] (5/7) Epoch 13, batch 2600, loss[loss=0.2331, simple_loss=0.2854, pruned_loss=0.09039, over 4816.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2584, pruned_loss=0.0619, over 955329.29 frames. ], batch size: 30, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:51:26,871 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.678e+02 1.922e+02 2.428e+02 5.321e+02, threshold=3.843e+02, percent-clipped=3.0 +2023-03-26 15:51:31,782 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71348.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:51:49,963 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5501, 1.4870, 2.1491, 3.1167, 2.1953, 2.2378, 1.2628, 2.4954], + device='cuda:5'), covar=tensor([0.1690, 0.1425, 0.1124, 0.0459, 0.0740, 0.1358, 0.1587, 0.0529], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0116, 0.0134, 0.0164, 0.0101, 0.0138, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 15:51:55,371 INFO [finetune.py:976] (5/7) Epoch 13, batch 2650, loss[loss=0.1909, simple_loss=0.2623, pruned_loss=0.05976, over 4795.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2596, pruned_loss=0.06217, over 955938.66 frames. ], batch size: 51, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:51:58,320 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71386.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:52:29,325 INFO [finetune.py:976] (5/7) Epoch 13, batch 2700, loss[loss=0.156, simple_loss=0.2158, pruned_loss=0.04814, over 4249.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2572, pruned_loss=0.06071, over 953456.46 frames. ], batch size: 66, lr: 3.60e-03, grad_scale: 32.0 +2023-03-26 15:52:34,538 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.578e+02 1.884e+02 2.307e+02 4.300e+02, threshold=3.769e+02, percent-clipped=2.0 +2023-03-26 15:53:02,923 INFO [finetune.py:976] (5/7) Epoch 13, batch 2750, loss[loss=0.218, simple_loss=0.2696, pruned_loss=0.08323, over 4831.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2553, pruned_loss=0.06067, over 955999.20 frames. ], batch size: 39, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:53:36,644 INFO [finetune.py:976] (5/7) Epoch 13, batch 2800, loss[loss=0.1811, simple_loss=0.2472, pruned_loss=0.05749, over 4777.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2523, pruned_loss=0.05967, over 956123.47 frames. ], batch size: 26, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:53:40,883 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.564e+02 1.863e+02 2.304e+02 3.302e+02, threshold=3.726e+02, percent-clipped=0.0 +2023-03-26 15:53:48,372 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-26 15:54:23,057 INFO [finetune.py:976] (5/7) Epoch 13, batch 2850, loss[loss=0.2074, simple_loss=0.2843, pruned_loss=0.06529, over 4831.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2515, pruned_loss=0.05936, over 954856.74 frames. ], batch size: 39, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:54:52,350 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71616.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:55:06,324 INFO [finetune.py:976] (5/7) Epoch 13, batch 2900, loss[loss=0.2246, simple_loss=0.297, pruned_loss=0.07607, over 4839.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2544, pruned_loss=0.06032, over 951876.05 frames. ], batch size: 49, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:55:15,493 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.635e+01 1.661e+02 1.944e+02 2.530e+02 6.475e+02, threshold=3.888e+02, percent-clipped=5.0 +2023-03-26 15:55:24,626 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71648.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:55:49,620 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71677.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:55:58,581 INFO [finetune.py:976] (5/7) Epoch 13, batch 2950, loss[loss=0.1386, simple_loss=0.2141, pruned_loss=0.03156, over 4776.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2564, pruned_loss=0.06135, over 951369.34 frames. ], batch size: 26, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:55:59,273 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2641, 2.9520, 2.7571, 1.1939, 3.0150, 2.2184, 0.7053, 1.9081], + device='cuda:5'), covar=tensor([0.2399, 0.1975, 0.1762, 0.3445, 0.1334, 0.1083, 0.4078, 0.1597], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0174, 0.0159, 0.0129, 0.0157, 0.0122, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 15:56:00,478 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71686.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:56:01,704 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2231, 3.6355, 3.8876, 4.0649, 3.9964, 3.7932, 4.3090, 1.5131], + device='cuda:5'), covar=tensor([0.0767, 0.0791, 0.0754, 0.0954, 0.1134, 0.1549, 0.0668, 0.5089], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0243, 0.0276, 0.0290, 0.0329, 0.0282, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:56:11,124 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=71696.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:56:39,793 INFO [finetune.py:976] (5/7) Epoch 13, batch 3000, loss[loss=0.2236, simple_loss=0.2965, pruned_loss=0.07535, over 4825.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.259, pruned_loss=0.06223, over 952938.42 frames. ], batch size: 49, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:56:39,793 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 15:56:46,713 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6890, 1.6314, 1.6497, 1.6627, 1.0700, 2.9986, 1.2038, 1.7501], + device='cuda:5'), covar=tensor([0.3185, 0.2366, 0.1968, 0.2246, 0.1840, 0.0262, 0.2544, 0.1186], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0121, 0.0124, 0.0116, 0.0099, 0.0098, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 15:56:48,748 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8515, 1.0755, 1.9415, 1.8152, 1.7480, 1.6175, 1.7215, 1.7425], + device='cuda:5'), covar=tensor([0.4410, 0.5007, 0.4289, 0.4266, 0.5650, 0.4162, 0.5238, 0.3843], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0240, 0.0256, 0.0265, 0.0262, 0.0236, 0.0277, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:56:50,410 INFO [finetune.py:1010] (5/7) Epoch 13, validation: loss=0.1572, simple_loss=0.2278, pruned_loss=0.04333, over 2265189.00 frames. +2023-03-26 15:56:50,410 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 15:56:51,093 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=71734.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:56:55,668 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.204e+02 1.624e+02 1.953e+02 2.376e+02 4.887e+02, threshold=3.907e+02, percent-clipped=1.0 +2023-03-26 15:57:22,729 INFO [finetune.py:976] (5/7) Epoch 13, batch 3050, loss[loss=0.1793, simple_loss=0.2613, pruned_loss=0.04859, over 4804.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2611, pruned_loss=0.06264, over 954412.51 frames. ], batch size: 45, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:57:41,953 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8480, 1.7492, 1.5644, 1.8846, 2.3975, 1.9407, 1.7027, 1.4526], + device='cuda:5'), covar=tensor([0.2190, 0.2027, 0.1904, 0.1680, 0.1746, 0.1205, 0.2339, 0.1851], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0207, 0.0210, 0.0190, 0.0240, 0.0183, 0.0214, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:57:55,478 INFO [finetune.py:976] (5/7) Epoch 13, batch 3100, loss[loss=0.17, simple_loss=0.2381, pruned_loss=0.05097, over 4891.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.2585, pruned_loss=0.06168, over 954551.91 frames. ], batch size: 43, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:58:01,083 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.937e+01 1.560e+02 1.843e+02 2.215e+02 5.565e+02, threshold=3.687e+02, percent-clipped=1.0 +2023-03-26 15:58:11,157 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8727, 3.4020, 3.5799, 3.7377, 3.5901, 3.4508, 3.9582, 1.3598], + device='cuda:5'), covar=tensor([0.0888, 0.0854, 0.0854, 0.1028, 0.1422, 0.1591, 0.0785, 0.5350], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0244, 0.0276, 0.0291, 0.0331, 0.0282, 0.0303, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:58:29,154 INFO [finetune.py:976] (5/7) Epoch 13, batch 3150, loss[loss=0.2017, simple_loss=0.2592, pruned_loss=0.07209, over 4944.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2557, pruned_loss=0.06068, over 955644.78 frames. ], batch size: 33, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:58:43,992 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8628, 1.6471, 1.4990, 1.2635, 1.6444, 1.6702, 1.6127, 2.1769], + device='cuda:5'), covar=tensor([0.4230, 0.4354, 0.3539, 0.4332, 0.4072, 0.2385, 0.3889, 0.1911], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0224, 0.0278, 0.0246, 0.0212, 0.0247, 0.0221], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 15:58:53,742 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71919.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:59:03,051 INFO [finetune.py:976] (5/7) Epoch 13, batch 3200, loss[loss=0.2012, simple_loss=0.262, pruned_loss=0.07019, over 4835.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2536, pruned_loss=0.06012, over 957147.97 frames. ], batch size: 33, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 15:59:07,313 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.561e+02 1.912e+02 2.265e+02 3.518e+02, threshold=3.824e+02, percent-clipped=0.0 +2023-03-26 15:59:40,764 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71972.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:59:54,383 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71980.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 15:59:56,074 INFO [finetune.py:976] (5/7) Epoch 13, batch 3250, loss[loss=0.2317, simple_loss=0.2947, pruned_loss=0.08441, over 4816.00 frames. ], tot_loss[loss=0.187, simple_loss=0.254, pruned_loss=0.05999, over 957312.79 frames. ], batch size: 45, lr: 3.59e-03, grad_scale: 32.0 +2023-03-26 16:00:39,653 INFO [finetune.py:976] (5/7) Epoch 13, batch 3300, loss[loss=0.163, simple_loss=0.2259, pruned_loss=0.05007, over 4704.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2557, pruned_loss=0.06057, over 955776.39 frames. ], batch size: 23, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:00:44,477 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.095e+02 1.593e+02 1.995e+02 2.341e+02 5.205e+02, threshold=3.991e+02, percent-clipped=4.0 +2023-03-26 16:01:02,425 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9060, 1.4741, 1.1047, 1.7522, 2.0766, 1.6237, 1.6295, 1.7123], + device='cuda:5'), covar=tensor([0.1273, 0.1764, 0.1845, 0.1031, 0.1815, 0.1876, 0.1250, 0.1594], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0112, 0.0092, 0.0120, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 16:01:29,184 INFO [finetune.py:976] (5/7) Epoch 13, batch 3350, loss[loss=0.1453, simple_loss=0.2195, pruned_loss=0.03556, over 4787.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2576, pruned_loss=0.06095, over 955968.66 frames. ], batch size: 29, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:02:00,572 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2586, 2.0731, 1.5776, 0.7652, 1.8166, 1.7915, 1.5466, 1.8081], + device='cuda:5'), covar=tensor([0.0896, 0.0854, 0.1558, 0.2074, 0.1460, 0.2245, 0.2386, 0.1014], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0196, 0.0199, 0.0184, 0.0212, 0.0206, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:02:07,933 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72129.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:02:11,038 INFO [finetune.py:976] (5/7) Epoch 13, batch 3400, loss[loss=0.2113, simple_loss=0.277, pruned_loss=0.0728, over 4887.00 frames. ], tot_loss[loss=0.1912, simple_loss=0.2593, pruned_loss=0.06153, over 956702.14 frames. ], batch size: 35, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:02:16,764 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.109e+02 1.708e+02 2.008e+02 2.371e+02 4.954e+02, threshold=4.015e+02, percent-clipped=4.0 +2023-03-26 16:02:49,868 INFO [finetune.py:976] (5/7) Epoch 13, batch 3450, loss[loss=0.1952, simple_loss=0.2627, pruned_loss=0.06383, over 4814.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2574, pruned_loss=0.06077, over 953090.76 frames. ], batch size: 33, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:02:55,171 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72190.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:03:23,344 INFO [finetune.py:976] (5/7) Epoch 13, batch 3500, loss[loss=0.1795, simple_loss=0.2362, pruned_loss=0.06139, over 4867.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2567, pruned_loss=0.06111, over 952868.10 frames. ], batch size: 31, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:03:29,064 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.641e+02 1.993e+02 2.438e+02 4.377e+02, threshold=3.986e+02, percent-clipped=2.0 +2023-03-26 16:03:40,094 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0810, 1.0217, 1.0093, 0.5423, 0.7982, 1.1583, 1.1920, 1.0012], + device='cuda:5'), covar=tensor([0.0749, 0.0481, 0.0459, 0.0428, 0.0483, 0.0533, 0.0334, 0.0583], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0153, 0.0123, 0.0129, 0.0131, 0.0127, 0.0143, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.3926e-05, 1.1160e-04, 8.8473e-05, 9.3179e-05, 9.3223e-05, 9.2324e-05, + 1.0436e-04, 1.0594e-04], device='cuda:5') +2023-03-26 16:03:45,204 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1116, 1.8986, 1.8275, 2.0454, 2.8692, 2.1552, 2.2027, 1.6324], + device='cuda:5'), covar=tensor([0.2570, 0.2389, 0.2307, 0.1962, 0.1768, 0.1263, 0.2217, 0.2293], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0208, 0.0211, 0.0191, 0.0241, 0.0184, 0.0214, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:03:49,884 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72272.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:03:51,653 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72275.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:03:56,422 INFO [finetune.py:976] (5/7) Epoch 13, batch 3550, loss[loss=0.1739, simple_loss=0.2424, pruned_loss=0.05273, over 4912.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2548, pruned_loss=0.06089, over 955273.35 frames. ], batch size: 36, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:03:58,073 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-26 16:04:37,460 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=72320.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:04:47,546 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72326.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:04:51,694 INFO [finetune.py:976] (5/7) Epoch 13, batch 3600, loss[loss=0.1755, simple_loss=0.247, pruned_loss=0.052, over 4826.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2521, pruned_loss=0.05989, over 956591.47 frames. ], batch size: 33, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:04:58,281 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.845e+01 1.525e+02 1.754e+02 2.048e+02 3.586e+02, threshold=3.507e+02, percent-clipped=0.0 +2023-03-26 16:05:09,761 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0079, 2.0835, 2.2494, 1.3734, 2.1237, 2.2005, 2.0893, 1.8771], + device='cuda:5'), covar=tensor([0.0686, 0.0701, 0.0668, 0.0937, 0.0659, 0.0752, 0.0699, 0.1096], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0133, 0.0141, 0.0123, 0.0124, 0.0142, 0.0141, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:05:42,445 INFO [finetune.py:976] (5/7) Epoch 13, batch 3650, loss[loss=0.1939, simple_loss=0.2551, pruned_loss=0.06635, over 4835.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2542, pruned_loss=0.06006, over 955971.22 frames. ], batch size: 33, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:05:50,466 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72387.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:06:45,516 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6162, 1.5996, 1.3406, 1.5586, 2.0061, 1.8117, 1.6102, 1.3892], + device='cuda:5'), covar=tensor([0.0318, 0.0305, 0.0565, 0.0300, 0.0221, 0.0440, 0.0334, 0.0402], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0108, 0.0139, 0.0112, 0.0101, 0.0104, 0.0094, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.2153e-05, 8.3823e-05, 1.0997e-04, 8.6755e-05, 7.8509e-05, 7.7341e-05, + 7.1243e-05, 8.2404e-05], device='cuda:5') +2023-03-26 16:06:54,368 INFO [finetune.py:976] (5/7) Epoch 13, batch 3700, loss[loss=0.1814, simple_loss=0.2611, pruned_loss=0.05079, over 4819.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.2571, pruned_loss=0.06086, over 954382.48 frames. ], batch size: 40, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:07:04,383 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.108e+02 1.616e+02 1.915e+02 2.308e+02 4.437e+02, threshold=3.829e+02, percent-clipped=1.0 +2023-03-26 16:07:36,930 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3122, 2.2470, 1.9113, 2.3241, 2.2611, 1.9950, 2.6485, 2.3373], + device='cuda:5'), covar=tensor([0.1295, 0.2039, 0.2942, 0.2358, 0.2521, 0.1674, 0.2752, 0.1820], + device='cuda:5'), in_proj_covar=tensor([0.0180, 0.0187, 0.0234, 0.0254, 0.0245, 0.0200, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:07:52,818 INFO [finetune.py:976] (5/7) Epoch 13, batch 3750, loss[loss=0.2367, simple_loss=0.3028, pruned_loss=0.08528, over 4903.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2584, pruned_loss=0.06103, over 954349.22 frames. ], batch size: 36, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:07:54,135 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72485.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:08:22,327 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1902, 1.9985, 2.0644, 0.9550, 2.2801, 2.3689, 2.1307, 1.8811], + device='cuda:5'), covar=tensor([0.0954, 0.0756, 0.0505, 0.0646, 0.0448, 0.0771, 0.0455, 0.0656], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0153, 0.0123, 0.0129, 0.0131, 0.0127, 0.0143, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.3714e-05, 1.1182e-04, 8.8242e-05, 9.3130e-05, 9.3230e-05, 9.1835e-05, + 1.0397e-04, 1.0597e-04], device='cuda:5') +2023-03-26 16:08:29,283 INFO [finetune.py:976] (5/7) Epoch 13, batch 3800, loss[loss=0.1694, simple_loss=0.2464, pruned_loss=0.04626, over 4858.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2592, pruned_loss=0.06091, over 955846.19 frames. ], batch size: 31, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:08:34,665 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.445e+01 1.578e+02 1.803e+02 2.155e+02 3.901e+02, threshold=3.607e+02, percent-clipped=1.0 +2023-03-26 16:08:50,125 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-03-26 16:08:56,822 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72575.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:09:02,584 INFO [finetune.py:976] (5/7) Epoch 13, batch 3850, loss[loss=0.1994, simple_loss=0.2553, pruned_loss=0.0718, over 4808.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2576, pruned_loss=0.06072, over 954984.92 frames. ], batch size: 41, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:09:04,569 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1793, 1.9301, 1.4369, 0.6331, 1.6569, 1.8102, 1.6503, 1.7735], + device='cuda:5'), covar=tensor([0.0854, 0.0783, 0.1486, 0.1952, 0.1306, 0.2481, 0.2320, 0.0917], + device='cuda:5'), in_proj_covar=tensor([0.0165, 0.0196, 0.0200, 0.0185, 0.0212, 0.0207, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:09:13,619 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0266, 0.8456, 0.9080, 1.0314, 1.2334, 1.1316, 0.9800, 0.8877], + device='cuda:5'), covar=tensor([0.0358, 0.0352, 0.0610, 0.0313, 0.0283, 0.0428, 0.0354, 0.0419], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0140, 0.0112, 0.0101, 0.0105, 0.0095, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2802e-05, 8.4593e-05, 1.1072e-04, 8.7312e-05, 7.8945e-05, 7.8021e-05, + 7.1660e-05, 8.2941e-05], device='cuda:5') +2023-03-26 16:09:35,398 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=72623.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:09:46,307 INFO [finetune.py:976] (5/7) Epoch 13, batch 3900, loss[loss=0.1488, simple_loss=0.216, pruned_loss=0.04074, over 4712.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2555, pruned_loss=0.06049, over 955064.73 frames. ], batch size: 23, lr: 3.59e-03, grad_scale: 16.0 +2023-03-26 16:09:51,186 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.402e+01 1.493e+02 1.751e+02 2.217e+02 3.590e+02, threshold=3.501e+02, percent-clipped=0.0 +2023-03-26 16:10:10,400 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6301, 1.5530, 1.8844, 1.3007, 1.6345, 1.8283, 1.4902, 1.9893], + device='cuda:5'), covar=tensor([0.1227, 0.2178, 0.1321, 0.1738, 0.0856, 0.1301, 0.2650, 0.0918], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0204, 0.0193, 0.0190, 0.0177, 0.0213, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:10:18,077 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72682.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:10:18,647 INFO [finetune.py:976] (5/7) Epoch 13, batch 3950, loss[loss=0.1857, simple_loss=0.2425, pruned_loss=0.06439, over 3903.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2527, pruned_loss=0.05998, over 953052.95 frames. ], batch size: 17, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:10:51,459 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 16:11:10,485 INFO [finetune.py:976] (5/7) Epoch 13, batch 4000, loss[loss=0.1929, simple_loss=0.2632, pruned_loss=0.0613, over 4920.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2522, pruned_loss=0.05958, over 953059.04 frames. ], batch size: 43, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:11:16,801 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.596e+02 1.921e+02 2.181e+02 4.609e+02, threshold=3.842e+02, percent-clipped=3.0 +2023-03-26 16:11:44,634 INFO [finetune.py:976] (5/7) Epoch 13, batch 4050, loss[loss=0.2111, simple_loss=0.2867, pruned_loss=0.0677, over 4820.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.254, pruned_loss=0.06009, over 952975.68 frames. ], batch size: 39, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:11:45,434 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9799, 1.5802, 2.1475, 1.5044, 1.9174, 2.1474, 1.5601, 2.2532], + device='cuda:5'), covar=tensor([0.1164, 0.1977, 0.1404, 0.1965, 0.0871, 0.1369, 0.2598, 0.0806], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0204, 0.0192, 0.0190, 0.0177, 0.0212, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:11:46,046 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72785.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:12:39,940 INFO [finetune.py:976] (5/7) Epoch 13, batch 4100, loss[loss=0.2026, simple_loss=0.2692, pruned_loss=0.06796, over 4926.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2566, pruned_loss=0.06082, over 952618.09 frames. ], batch size: 33, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:12:39,998 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=72833.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:12:45,295 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.089e+02 1.592e+02 1.875e+02 2.230e+02 3.624e+02, threshold=3.749e+02, percent-clipped=0.0 +2023-03-26 16:13:13,291 INFO [finetune.py:976] (5/7) Epoch 13, batch 4150, loss[loss=0.2154, simple_loss=0.2845, pruned_loss=0.07312, over 4762.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.2582, pruned_loss=0.06179, over 951552.27 frames. ], batch size: 28, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:13:26,383 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72894.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:13:33,977 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7002, 1.4931, 2.0259, 3.3994, 2.2473, 2.4659, 0.8999, 2.6830], + device='cuda:5'), covar=tensor([0.1757, 0.1427, 0.1394, 0.0549, 0.0831, 0.1315, 0.1993, 0.0549], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0114, 0.0132, 0.0163, 0.0100, 0.0137, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 16:13:35,856 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4955, 1.4943, 1.8383, 1.8202, 1.5230, 3.3286, 1.2709, 1.5816], + device='cuda:5'), covar=tensor([0.0950, 0.1757, 0.1244, 0.0988, 0.1613, 0.0231, 0.1546, 0.1818], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0077, 0.0091, 0.0081, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:14:03,786 INFO [finetune.py:976] (5/7) Epoch 13, batch 4200, loss[loss=0.2217, simple_loss=0.288, pruned_loss=0.07767, over 4904.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2587, pruned_loss=0.06181, over 953695.04 frames. ], batch size: 36, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:14:08,712 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.693e+01 1.496e+02 1.812e+02 2.169e+02 4.504e+02, threshold=3.624e+02, percent-clipped=2.0 +2023-03-26 16:14:18,728 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72955.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:14:35,613 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72964.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 16:14:52,529 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72982.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:14:53,055 INFO [finetune.py:976] (5/7) Epoch 13, batch 4250, loss[loss=0.2139, simple_loss=0.2791, pruned_loss=0.07435, over 4203.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2559, pruned_loss=0.06081, over 953362.98 frames. ], batch size: 65, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:14:56,859 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0514, 2.0994, 2.1655, 1.4871, 2.1525, 2.2506, 2.2523, 1.8732], + device='cuda:5'), covar=tensor([0.0588, 0.0580, 0.0644, 0.0882, 0.0736, 0.0607, 0.0572, 0.0994], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0131, 0.0140, 0.0122, 0.0122, 0.0140, 0.0139, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:14:58,107 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0380, 1.6698, 2.2615, 1.5173, 1.9062, 2.1363, 1.6222, 2.3463], + device='cuda:5'), covar=tensor([0.1177, 0.2006, 0.1323, 0.1950, 0.0822, 0.1493, 0.2453, 0.0859], + device='cuda:5'), in_proj_covar=tensor([0.0195, 0.0205, 0.0192, 0.0191, 0.0178, 0.0213, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:15:21,393 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73025.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 16:15:24,378 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=73030.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:15:26,656 INFO [finetune.py:976] (5/7) Epoch 13, batch 4300, loss[loss=0.2426, simple_loss=0.2964, pruned_loss=0.09443, over 4907.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2534, pruned_loss=0.06015, over 955585.41 frames. ], batch size: 36, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:15:31,992 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.052e+02 1.498e+02 1.782e+02 2.254e+02 4.055e+02, threshold=3.563e+02, percent-clipped=2.0 +2023-03-26 16:15:59,433 INFO [finetune.py:976] (5/7) Epoch 13, batch 4350, loss[loss=0.1799, simple_loss=0.2435, pruned_loss=0.05816, over 4807.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2515, pruned_loss=0.05941, over 956283.79 frames. ], batch size: 51, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:16:34,971 INFO [finetune.py:976] (5/7) Epoch 13, batch 4400, loss[loss=0.187, simple_loss=0.2547, pruned_loss=0.05965, over 4815.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2536, pruned_loss=0.06017, over 955539.33 frames. ], batch size: 40, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:16:40,306 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.328e+01 1.433e+02 1.829e+02 2.142e+02 3.915e+02, threshold=3.659e+02, percent-clipped=1.0 +2023-03-26 16:16:44,605 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0928, 2.0308, 1.6921, 1.9724, 1.9256, 1.9178, 1.9625, 2.5938], + device='cuda:5'), covar=tensor([0.4292, 0.4891, 0.3627, 0.4241, 0.4391, 0.2666, 0.4225, 0.1929], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0224, 0.0277, 0.0246, 0.0212, 0.0248, 0.0222], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:17:06,979 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6855, 1.5569, 1.5074, 1.5748, 1.0130, 3.7123, 1.4133, 1.8846], + device='cuda:5'), covar=tensor([0.3460, 0.2621, 0.2253, 0.2449, 0.1985, 0.0180, 0.2660, 0.1367], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0121, 0.0124, 0.0115, 0.0098, 0.0098, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:17:08,723 INFO [finetune.py:976] (5/7) Epoch 13, batch 4450, loss[loss=0.1954, simple_loss=0.2731, pruned_loss=0.05889, over 4817.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2571, pruned_loss=0.06122, over 956511.22 frames. ], batch size: 51, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:17:53,146 INFO [finetune.py:976] (5/7) Epoch 13, batch 4500, loss[loss=0.1693, simple_loss=0.2425, pruned_loss=0.04811, over 4895.00 frames. ], tot_loss[loss=0.1912, simple_loss=0.2586, pruned_loss=0.06188, over 956286.43 frames. ], batch size: 32, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:17:58,018 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.732e+02 2.105e+02 2.505e+02 4.470e+02, threshold=4.210e+02, percent-clipped=3.0 +2023-03-26 16:18:04,480 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73250.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:18:08,552 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-26 16:18:12,268 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0692, 1.3469, 0.7863, 1.9195, 2.3358, 1.7122, 1.6102, 1.9711], + device='cuda:5'), covar=tensor([0.1464, 0.2060, 0.2213, 0.1117, 0.1811, 0.2091, 0.1473, 0.1868], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0094, 0.0111, 0.0091, 0.0119, 0.0093, 0.0098, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 16:18:19,707 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8516, 1.6859, 1.6514, 1.7371, 1.1975, 4.4831, 1.6207, 2.0503], + device='cuda:5'), covar=tensor([0.3480, 0.2500, 0.2208, 0.2417, 0.1922, 0.0125, 0.2537, 0.1372], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0121, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:18:26,892 INFO [finetune.py:976] (5/7) Epoch 13, batch 4550, loss[loss=0.2227, simple_loss=0.2846, pruned_loss=0.08041, over 4737.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2591, pruned_loss=0.06204, over 957005.15 frames. ], batch size: 54, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:18:34,947 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73293.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:19:00,019 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6783, 1.7033, 1.6540, 1.0023, 1.7658, 1.9018, 1.8877, 1.4737], + device='cuda:5'), covar=tensor([0.0819, 0.0588, 0.0589, 0.0566, 0.0418, 0.0622, 0.0381, 0.0710], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0152, 0.0122, 0.0128, 0.0131, 0.0126, 0.0142, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.2262e-05, 1.1099e-04, 8.7931e-05, 9.2326e-05, 9.2733e-05, 9.1100e-05, + 1.0357e-04, 1.0541e-04], device='cuda:5') +2023-03-26 16:19:07,529 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73320.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 16:19:19,980 INFO [finetune.py:976] (5/7) Epoch 13, batch 4600, loss[loss=0.1549, simple_loss=0.2311, pruned_loss=0.03931, over 4773.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.2576, pruned_loss=0.06064, over 955897.35 frames. ], batch size: 26, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:19:24,708 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.46 vs. limit=5.0 +2023-03-26 16:19:24,888 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.809e+01 1.600e+02 1.901e+02 2.194e+02 3.702e+02, threshold=3.803e+02, percent-clipped=0.0 +2023-03-26 16:19:42,025 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73354.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:20:11,236 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8137, 1.3090, 0.7318, 1.7166, 2.1162, 1.3564, 1.5627, 1.6952], + device='cuda:5'), covar=tensor([0.1393, 0.2012, 0.2090, 0.1102, 0.1853, 0.1873, 0.1376, 0.1902], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0112, 0.0091, 0.0119, 0.0093, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 16:20:11,745 INFO [finetune.py:976] (5/7) Epoch 13, batch 4650, loss[loss=0.1992, simple_loss=0.2594, pruned_loss=0.06951, over 4822.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2553, pruned_loss=0.06059, over 952069.69 frames. ], batch size: 30, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:20:15,957 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6337, 1.4809, 1.4794, 1.5391, 1.0052, 3.2912, 1.2430, 1.7736], + device='cuda:5'), covar=tensor([0.3490, 0.2612, 0.2217, 0.2495, 0.2023, 0.0223, 0.2752, 0.1325], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0121, 0.0123, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:20:45,673 INFO [finetune.py:976] (5/7) Epoch 13, batch 4700, loss[loss=0.1806, simple_loss=0.2447, pruned_loss=0.05825, over 4763.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.2524, pruned_loss=0.05942, over 952490.11 frames. ], batch size: 27, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:20:50,437 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.031e+02 1.614e+02 1.909e+02 2.257e+02 3.771e+02, threshold=3.817e+02, percent-clipped=0.0 +2023-03-26 16:21:16,016 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0999, 1.7512, 2.1447, 2.0065, 1.7450, 1.7993, 1.9570, 1.9184], + device='cuda:5'), covar=tensor([0.4090, 0.4594, 0.3469, 0.4281, 0.5603, 0.4356, 0.5585, 0.3497], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0237, 0.0256, 0.0264, 0.0261, 0.0236, 0.0276, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:21:18,784 INFO [finetune.py:976] (5/7) Epoch 13, batch 4750, loss[loss=0.1376, simple_loss=0.2139, pruned_loss=0.03066, over 4874.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2507, pruned_loss=0.0589, over 954358.64 frames. ], batch size: 31, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:21:21,780 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6335, 1.1529, 0.8276, 1.4776, 1.9718, 1.0284, 1.3289, 1.4613], + device='cuda:5'), covar=tensor([0.1528, 0.2220, 0.2059, 0.1257, 0.2063, 0.2131, 0.1588, 0.2102], + device='cuda:5'), in_proj_covar=tensor([0.0088, 0.0094, 0.0112, 0.0091, 0.0119, 0.0093, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 16:21:36,190 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5085, 1.6575, 1.3310, 1.5497, 1.8851, 1.7346, 1.5296, 1.3882], + device='cuda:5'), covar=tensor([0.0375, 0.0321, 0.0553, 0.0285, 0.0209, 0.0579, 0.0326, 0.0415], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0108, 0.0138, 0.0111, 0.0100, 0.0104, 0.0095, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2444e-05, 8.3537e-05, 1.0937e-04, 8.6621e-05, 7.8057e-05, 7.6996e-05, + 7.1396e-05, 8.2820e-05], device='cuda:5') +2023-03-26 16:21:51,902 INFO [finetune.py:976] (5/7) Epoch 13, batch 4800, loss[loss=0.25, simple_loss=0.3026, pruned_loss=0.09874, over 4850.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2548, pruned_loss=0.06109, over 953665.51 frames. ], batch size: 44, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:21:57,193 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.637e+02 2.007e+02 2.318e+02 3.852e+02, threshold=4.014e+02, percent-clipped=1.0 +2023-03-26 16:22:03,308 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73550.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:22:12,402 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0393, 2.0266, 1.7459, 2.1510, 1.9654, 1.9470, 1.8662, 2.7170], + device='cuda:5'), covar=tensor([0.3983, 0.5264, 0.3493, 0.4576, 0.4838, 0.2636, 0.4752, 0.1624], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0261, 0.0224, 0.0279, 0.0247, 0.0214, 0.0248, 0.0224], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:22:21,503 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-03-26 16:22:24,806 INFO [finetune.py:976] (5/7) Epoch 13, batch 4850, loss[loss=0.1731, simple_loss=0.256, pruned_loss=0.04509, over 4834.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.2566, pruned_loss=0.06112, over 952579.84 frames. ], batch size: 47, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:22:30,082 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73590.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:22:30,719 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9291, 1.7389, 1.5010, 1.5280, 1.6816, 1.6850, 1.6946, 2.3549], + device='cuda:5'), covar=tensor([0.4118, 0.4452, 0.3367, 0.4163, 0.4239, 0.2463, 0.4064, 0.1795], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0224, 0.0278, 0.0247, 0.0213, 0.0248, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:22:37,221 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=73598.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:23:00,189 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73620.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 16:23:06,795 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5211, 1.4842, 1.7767, 1.7119, 1.5193, 3.2253, 1.3304, 1.5096], + device='cuda:5'), covar=tensor([0.0955, 0.1869, 0.1197, 0.0979, 0.1672, 0.0263, 0.1580, 0.1783], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0077, 0.0091, 0.0081, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:23:08,518 INFO [finetune.py:976] (5/7) Epoch 13, batch 4900, loss[loss=0.1877, simple_loss=0.2525, pruned_loss=0.06144, over 4153.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.258, pruned_loss=0.06111, over 950615.76 frames. ], batch size: 65, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:23:14,278 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.751e+02 2.108e+02 2.596e+02 5.059e+02, threshold=4.217e+02, percent-clipped=3.0 +2023-03-26 16:23:19,746 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73649.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:23:21,042 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73651.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:23:31,884 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=73668.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 16:23:41,382 INFO [finetune.py:976] (5/7) Epoch 13, batch 4950, loss[loss=0.1844, simple_loss=0.2594, pruned_loss=0.0547, over 4790.00 frames. ], tot_loss[loss=0.1907, simple_loss=0.2588, pruned_loss=0.06133, over 951241.37 frames. ], batch size: 51, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:24:24,445 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73732.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:24:24,939 INFO [finetune.py:976] (5/7) Epoch 13, batch 5000, loss[loss=0.2052, simple_loss=0.2738, pruned_loss=0.06832, over 4898.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2582, pruned_loss=0.06086, over 952780.74 frames. ], batch size: 35, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:24:26,120 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3042, 1.5947, 1.4967, 1.5032, 1.6545, 3.0363, 1.3777, 1.6563], + device='cuda:5'), covar=tensor([0.0978, 0.1728, 0.1059, 0.0906, 0.1509, 0.0270, 0.1462, 0.1641], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0077, 0.0091, 0.0080, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:24:33,746 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.586e+02 1.888e+02 2.371e+02 3.310e+02, threshold=3.776e+02, percent-clipped=1.0 +2023-03-26 16:25:04,199 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 16:25:14,973 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9288, 1.7801, 1.6601, 1.8178, 1.3434, 4.1061, 1.5548, 1.9917], + device='cuda:5'), covar=tensor([0.2993, 0.2267, 0.1977, 0.2060, 0.1583, 0.0129, 0.2614, 0.1197], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0121, 0.0124, 0.0115, 0.0098, 0.0098, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:25:17,335 INFO [finetune.py:976] (5/7) Epoch 13, batch 5050, loss[loss=0.2213, simple_loss=0.2771, pruned_loss=0.08274, over 4911.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2551, pruned_loss=0.05989, over 954407.74 frames. ], batch size: 36, lr: 3.58e-03, grad_scale: 16.0 +2023-03-26 16:25:21,829 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0942, 2.2737, 2.3631, 1.6752, 2.3537, 2.4381, 2.4692, 2.0401], + device='cuda:5'), covar=tensor([0.0609, 0.0628, 0.0685, 0.0926, 0.0640, 0.0671, 0.0569, 0.0944], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0133, 0.0142, 0.0123, 0.0123, 0.0141, 0.0141, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:25:26,626 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73793.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:25:36,786 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-03-26 16:25:53,900 INFO [finetune.py:976] (5/7) Epoch 13, batch 5100, loss[loss=0.1573, simple_loss=0.229, pruned_loss=0.04277, over 4814.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2511, pruned_loss=0.0582, over 956204.85 frames. ], batch size: 25, lr: 3.57e-03, grad_scale: 16.0 +2023-03-26 16:25:59,155 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.804e+01 1.435e+02 1.752e+02 2.086e+02 3.868e+02, threshold=3.504e+02, percent-clipped=1.0 +2023-03-26 16:26:27,642 INFO [finetune.py:976] (5/7) Epoch 13, batch 5150, loss[loss=0.2456, simple_loss=0.3035, pruned_loss=0.09388, over 4874.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2517, pruned_loss=0.05891, over 954454.78 frames. ], batch size: 34, lr: 3.57e-03, grad_scale: 16.0 +2023-03-26 16:26:47,209 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3530, 2.2057, 2.0528, 2.2813, 2.1364, 2.1517, 2.1331, 2.8118], + device='cuda:5'), covar=tensor([0.3660, 0.4764, 0.3084, 0.3813, 0.3985, 0.2374, 0.3867, 0.1706], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0224, 0.0277, 0.0246, 0.0213, 0.0248, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:27:01,331 INFO [finetune.py:976] (5/7) Epoch 13, batch 5200, loss[loss=0.1894, simple_loss=0.2789, pruned_loss=0.04997, over 4804.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2564, pruned_loss=0.06085, over 954485.35 frames. ], batch size: 41, lr: 3.57e-03, grad_scale: 16.0 +2023-03-26 16:27:06,220 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.103e+02 1.675e+02 1.952e+02 2.217e+02 3.649e+02, threshold=3.904e+02, percent-clipped=2.0 +2023-03-26 16:27:09,802 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73946.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:27:11,690 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73949.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:27:34,704 INFO [finetune.py:976] (5/7) Epoch 13, batch 5250, loss[loss=0.164, simple_loss=0.2383, pruned_loss=0.04483, over 4742.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.258, pruned_loss=0.06091, over 953933.12 frames. ], batch size: 54, lr: 3.57e-03, grad_scale: 16.0 +2023-03-26 16:27:44,368 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=73997.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:27:45,064 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2076, 2.1551, 1.8741, 2.1601, 2.0603, 2.0876, 2.0757, 2.9225], + device='cuda:5'), covar=tensor([0.4148, 0.5475, 0.3554, 0.4767, 0.4717, 0.2453, 0.4795, 0.1706], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0224, 0.0277, 0.0246, 0.0213, 0.0247, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:28:11,320 INFO [finetune.py:976] (5/7) Epoch 13, batch 5300, loss[loss=0.274, simple_loss=0.3207, pruned_loss=0.1136, over 4147.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2583, pruned_loss=0.06069, over 954254.07 frames. ], batch size: 65, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:28:17,127 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.042e+02 1.638e+02 2.059e+02 2.515e+02 4.122e+02, threshold=4.117e+02, percent-clipped=3.0 +2023-03-26 16:28:33,152 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74064.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:28:45,017 INFO [finetune.py:976] (5/7) Epoch 13, batch 5350, loss[loss=0.1783, simple_loss=0.2437, pruned_loss=0.05642, over 4781.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.2586, pruned_loss=0.06095, over 954048.00 frames. ], batch size: 29, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:28:48,551 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74088.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:28:55,636 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5547, 1.6220, 2.1489, 1.8229, 1.7846, 4.0421, 1.4119, 1.8472], + device='cuda:5'), covar=tensor([0.0913, 0.1641, 0.1175, 0.0914, 0.1454, 0.0190, 0.1437, 0.1651], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:28:57,573 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.89 vs. limit=5.0 +2023-03-26 16:29:12,985 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74125.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:29:18,157 INFO [finetune.py:976] (5/7) Epoch 13, batch 5400, loss[loss=0.1647, simple_loss=0.2328, pruned_loss=0.04826, over 4737.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2564, pruned_loss=0.06009, over 954876.56 frames. ], batch size: 54, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:29:27,917 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.066e+02 1.534e+02 1.853e+02 2.261e+02 4.254e+02, threshold=3.706e+02, percent-clipped=1.0 +2023-03-26 16:30:11,888 INFO [finetune.py:976] (5/7) Epoch 13, batch 5450, loss[loss=0.1906, simple_loss=0.2542, pruned_loss=0.06351, over 4816.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2535, pruned_loss=0.05892, over 954980.77 frames. ], batch size: 45, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:30:56,457 INFO [finetune.py:976] (5/7) Epoch 13, batch 5500, loss[loss=0.2044, simple_loss=0.2646, pruned_loss=0.0721, over 4907.00 frames. ], tot_loss[loss=0.1824, simple_loss=0.2498, pruned_loss=0.05746, over 954083.56 frames. ], batch size: 36, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:31:01,348 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74240.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:31:01,849 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.610e+01 1.534e+02 1.911e+02 2.187e+02 5.924e+02, threshold=3.822e+02, percent-clipped=2.0 +2023-03-26 16:31:04,992 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74246.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:31:30,484 INFO [finetune.py:976] (5/7) Epoch 13, batch 5550, loss[loss=0.2549, simple_loss=0.3168, pruned_loss=0.09652, over 4840.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2516, pruned_loss=0.05861, over 953474.83 frames. ], batch size: 47, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:31:37,726 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=74294.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:31:42,484 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74301.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:32:02,279 INFO [finetune.py:976] (5/7) Epoch 13, batch 5600, loss[loss=0.208, simple_loss=0.2879, pruned_loss=0.06408, over 4841.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2559, pruned_loss=0.06003, over 953403.67 frames. ], batch size: 49, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:32:06,850 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.439e+01 1.528e+02 1.833e+02 2.251e+02 4.644e+02, threshold=3.666e+02, percent-clipped=1.0 +2023-03-26 16:32:31,595 INFO [finetune.py:976] (5/7) Epoch 13, batch 5650, loss[loss=0.1834, simple_loss=0.2558, pruned_loss=0.05551, over 4829.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.259, pruned_loss=0.06079, over 955226.96 frames. ], batch size: 30, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:32:35,022 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74388.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:32:54,216 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74420.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:33:01,852 INFO [finetune.py:976] (5/7) Epoch 13, batch 5700, loss[loss=0.1776, simple_loss=0.2324, pruned_loss=0.06135, over 4157.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2548, pruned_loss=0.06046, over 934180.11 frames. ], batch size: 18, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:33:03,648 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=74436.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:33:06,486 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.627e+01 1.487e+02 1.916e+02 2.529e+02 4.839e+02, threshold=3.833e+02, percent-clipped=5.0 +2023-03-26 16:33:31,117 INFO [finetune.py:976] (5/7) Epoch 14, batch 0, loss[loss=0.1771, simple_loss=0.2473, pruned_loss=0.05345, over 4748.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.2473, pruned_loss=0.05345, over 4748.00 frames. ], batch size: 27, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:33:31,117 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 16:33:41,691 INFO [finetune.py:1010] (5/7) Epoch 14, validation: loss=0.1582, simple_loss=0.2295, pruned_loss=0.04344, over 2265189.00 frames. +2023-03-26 16:33:41,691 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 16:33:53,663 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3141, 1.3748, 1.4419, 1.5441, 1.4479, 3.0024, 1.3217, 1.4898], + device='cuda:5'), covar=tensor([0.1018, 0.1902, 0.1069, 0.0957, 0.1722, 0.0312, 0.1600, 0.1790], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:34:14,921 INFO [finetune.py:976] (5/7) Epoch 14, batch 50, loss[loss=0.2219, simple_loss=0.2817, pruned_loss=0.08107, over 4866.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2647, pruned_loss=0.06449, over 215623.94 frames. ], batch size: 31, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:34:42,637 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.553e+01 1.578e+02 1.920e+02 2.248e+02 3.729e+02, threshold=3.841e+02, percent-clipped=1.0 +2023-03-26 16:35:04,284 INFO [finetune.py:976] (5/7) Epoch 14, batch 100, loss[loss=0.141, simple_loss=0.2111, pruned_loss=0.03541, over 4798.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.257, pruned_loss=0.06138, over 381669.99 frames. ], batch size: 26, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:35:08,031 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-03-26 16:35:09,035 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6013, 1.4249, 2.1362, 3.1409, 2.1228, 2.4035, 0.8562, 2.6002], + device='cuda:5'), covar=tensor([0.1907, 0.1493, 0.1254, 0.0590, 0.0849, 0.1386, 0.2059, 0.0579], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0115, 0.0133, 0.0164, 0.0100, 0.0137, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 16:35:31,042 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74595.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:35:31,623 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74596.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:35:49,901 INFO [finetune.py:976] (5/7) Epoch 14, batch 150, loss[loss=0.1622, simple_loss=0.2263, pruned_loss=0.04901, over 4888.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2511, pruned_loss=0.05892, over 507207.26 frames. ], batch size: 32, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:36:22,702 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.552e+02 1.795e+02 2.139e+02 3.747e+02, threshold=3.589e+02, percent-clipped=0.0 +2023-03-26 16:36:33,030 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74656.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:36:35,949 INFO [finetune.py:976] (5/7) Epoch 14, batch 200, loss[loss=0.1979, simple_loss=0.2651, pruned_loss=0.06536, over 4854.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2505, pruned_loss=0.05964, over 607065.64 frames. ], batch size: 44, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:37:09,842 INFO [finetune.py:976] (5/7) Epoch 14, batch 250, loss[loss=0.2334, simple_loss=0.2896, pruned_loss=0.08855, over 4905.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2522, pruned_loss=0.06055, over 684300.15 frames. ], batch size: 37, lr: 3.57e-03, grad_scale: 32.0 +2023-03-26 16:37:15,968 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74720.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:37:30,096 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.719e+02 2.081e+02 2.468e+02 4.342e+02, threshold=4.162e+02, percent-clipped=2.0 +2023-03-26 16:37:41,612 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0513, 1.8049, 1.5722, 1.6567, 1.7383, 1.7799, 1.7630, 2.5340], + device='cuda:5'), covar=tensor([0.3723, 0.4774, 0.3319, 0.4219, 0.4277, 0.2458, 0.3899, 0.1533], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0224, 0.0275, 0.0246, 0.0213, 0.0247, 0.0222], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:37:42,695 INFO [finetune.py:976] (5/7) Epoch 14, batch 300, loss[loss=0.182, simple_loss=0.2581, pruned_loss=0.05295, over 4818.00 frames. ], tot_loss[loss=0.1923, simple_loss=0.2588, pruned_loss=0.06288, over 744186.61 frames. ], batch size: 30, lr: 3.56e-03, grad_scale: 32.0 +2023-03-26 16:37:48,015 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=74768.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:38:16,372 INFO [finetune.py:976] (5/7) Epoch 14, batch 350, loss[loss=0.1998, simple_loss=0.2705, pruned_loss=0.06451, over 4822.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2625, pruned_loss=0.06442, over 790317.50 frames. ], batch size: 30, lr: 3.56e-03, grad_scale: 32.0 +2023-03-26 16:38:36,814 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.618e+01 1.648e+02 1.967e+02 2.475e+02 5.107e+02, threshold=3.933e+02, percent-clipped=3.0 +2023-03-26 16:38:39,434 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 16:38:49,809 INFO [finetune.py:976] (5/7) Epoch 14, batch 400, loss[loss=0.2022, simple_loss=0.2726, pruned_loss=0.06588, over 4726.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2623, pruned_loss=0.06408, over 826300.48 frames. ], batch size: 59, lr: 3.56e-03, grad_scale: 32.0 +2023-03-26 16:39:13,574 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74896.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:39:22,416 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74909.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:39:23,515 INFO [finetune.py:976] (5/7) Epoch 14, batch 450, loss[loss=0.1733, simple_loss=0.2508, pruned_loss=0.04785, over 4880.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2602, pruned_loss=0.06293, over 855456.15 frames. ], batch size: 32, lr: 3.56e-03, grad_scale: 32.0 +2023-03-26 16:39:35,434 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6857, 1.5224, 1.5272, 1.5361, 1.1013, 3.0268, 1.0844, 1.6240], + device='cuda:5'), covar=tensor([0.3296, 0.2493, 0.2196, 0.2492, 0.1831, 0.0260, 0.2773, 0.1286], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0121, 0.0124, 0.0116, 0.0098, 0.0098, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:39:43,598 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.587e+02 1.868e+02 2.260e+02 4.285e+02, threshold=3.737e+02, percent-clipped=2.0 +2023-03-26 16:39:45,926 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=74944.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:39:50,637 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74951.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:39:58,629 INFO [finetune.py:976] (5/7) Epoch 14, batch 500, loss[loss=0.1896, simple_loss=0.2532, pruned_loss=0.06301, over 4825.00 frames. ], tot_loss[loss=0.1907, simple_loss=0.2574, pruned_loss=0.06205, over 878249.21 frames. ], batch size: 40, lr: 3.56e-03, grad_scale: 32.0 +2023-03-26 16:40:00,601 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74964.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:40:08,980 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74970.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:40:39,649 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6537, 1.5672, 1.4041, 1.5342, 1.9611, 1.7551, 1.6070, 1.3730], + device='cuda:5'), covar=tensor([0.0285, 0.0303, 0.0547, 0.0304, 0.0215, 0.0461, 0.0353, 0.0452], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0109, 0.0140, 0.0113, 0.0101, 0.0106, 0.0095, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.2579e-05, 8.4439e-05, 1.1120e-04, 8.7919e-05, 7.8741e-05, 7.8420e-05, + 7.1763e-05, 8.3472e-05], device='cuda:5') +2023-03-26 16:40:45,476 INFO [finetune.py:976] (5/7) Epoch 14, batch 550, loss[loss=0.2066, simple_loss=0.2683, pruned_loss=0.07242, over 4925.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2542, pruned_loss=0.06119, over 895340.35 frames. ], batch size: 38, lr: 3.56e-03, grad_scale: 32.0 +2023-03-26 16:40:51,306 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9912, 1.7283, 1.5838, 1.5782, 1.6188, 1.6129, 1.6688, 2.4208], + device='cuda:5'), covar=tensor([0.3773, 0.4238, 0.3118, 0.3721, 0.4014, 0.2369, 0.3841, 0.1643], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0225, 0.0276, 0.0247, 0.0214, 0.0249, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:40:57,860 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75025.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:41:09,612 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.564e+02 1.846e+02 2.204e+02 7.411e+02, threshold=3.691e+02, percent-clipped=3.0 +2023-03-26 16:41:32,953 INFO [finetune.py:976] (5/7) Epoch 14, batch 600, loss[loss=0.1793, simple_loss=0.2632, pruned_loss=0.04766, over 4897.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2556, pruned_loss=0.06193, over 908646.89 frames. ], batch size: 43, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:42:10,222 INFO [finetune.py:976] (5/7) Epoch 14, batch 650, loss[loss=0.1974, simple_loss=0.2741, pruned_loss=0.06041, over 4825.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2586, pruned_loss=0.06293, over 919466.45 frames. ], batch size: 33, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:42:30,910 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.620e+02 1.922e+02 2.248e+02 3.855e+02, threshold=3.845e+02, percent-clipped=1.0 +2023-03-26 16:42:43,797 INFO [finetune.py:976] (5/7) Epoch 14, batch 700, loss[loss=0.1927, simple_loss=0.2661, pruned_loss=0.05969, over 4867.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2599, pruned_loss=0.06301, over 927680.44 frames. ], batch size: 34, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:43:16,881 INFO [finetune.py:976] (5/7) Epoch 14, batch 750, loss[loss=0.1965, simple_loss=0.2714, pruned_loss=0.0608, over 4812.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2618, pruned_loss=0.06327, over 931719.87 frames. ], batch size: 39, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:43:27,930 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 16:43:28,290 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75228.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:43:37,710 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.931e+01 1.583e+02 1.834e+02 2.163e+02 4.783e+02, threshold=3.668e+02, percent-clipped=1.0 +2023-03-26 16:43:44,243 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75251.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:43:50,668 INFO [finetune.py:976] (5/7) Epoch 14, batch 800, loss[loss=0.2073, simple_loss=0.2726, pruned_loss=0.07095, over 4929.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2609, pruned_loss=0.06249, over 937724.63 frames. ], batch size: 41, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:43:53,647 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75265.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:43:56,670 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75270.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:44:09,565 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75289.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:44:16,561 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=75299.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:44:24,289 INFO [finetune.py:976] (5/7) Epoch 14, batch 850, loss[loss=0.1912, simple_loss=0.258, pruned_loss=0.06218, over 4867.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.257, pruned_loss=0.0609, over 942941.24 frames. ], batch size: 31, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:44:30,258 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75320.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:44:37,480 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75331.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 16:44:38,058 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0608, 0.9984, 0.9241, 1.1929, 1.2327, 1.1727, 1.0339, 0.9889], + device='cuda:5'), covar=tensor([0.0347, 0.0317, 0.0615, 0.0318, 0.0293, 0.0434, 0.0370, 0.0415], + device='cuda:5'), in_proj_covar=tensor([0.0093, 0.0108, 0.0140, 0.0113, 0.0100, 0.0105, 0.0095, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2484e-05, 8.3760e-05, 1.1055e-04, 8.7613e-05, 7.7869e-05, 7.7883e-05, + 7.1536e-05, 8.2607e-05], device='cuda:5') +2023-03-26 16:44:44,946 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.704e+01 1.586e+02 1.985e+02 2.275e+02 3.825e+02, threshold=3.970e+02, percent-clipped=2.0 +2023-03-26 16:44:57,420 INFO [finetune.py:976] (5/7) Epoch 14, batch 900, loss[loss=0.1496, simple_loss=0.2245, pruned_loss=0.03737, over 4849.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2543, pruned_loss=0.05981, over 947627.94 frames. ], batch size: 44, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:45:03,177 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-03-26 16:45:44,930 INFO [finetune.py:976] (5/7) Epoch 14, batch 950, loss[loss=0.2431, simple_loss=0.309, pruned_loss=0.08867, over 4806.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2546, pruned_loss=0.06063, over 950443.92 frames. ], batch size: 45, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:45:50,503 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1968, 1.6740, 1.9483, 2.0429, 1.7769, 1.7893, 1.9268, 1.8135], + device='cuda:5'), covar=tensor([0.5279, 0.5474, 0.4737, 0.5319, 0.6810, 0.5299, 0.7105, 0.4669], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0237, 0.0255, 0.0264, 0.0261, 0.0235, 0.0276, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:46:05,784 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.942e+01 1.566e+02 1.871e+02 2.243e+02 4.539e+02, threshold=3.743e+02, percent-clipped=1.0 +2023-03-26 16:46:17,398 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 16:46:18,860 INFO [finetune.py:976] (5/7) Epoch 14, batch 1000, loss[loss=0.2274, simple_loss=0.2953, pruned_loss=0.07974, over 4820.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.2578, pruned_loss=0.06141, over 951293.79 frames. ], batch size: 40, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:47:07,212 INFO [finetune.py:976] (5/7) Epoch 14, batch 1050, loss[loss=0.1973, simple_loss=0.2607, pruned_loss=0.06689, over 4754.00 frames. ], tot_loss[loss=0.191, simple_loss=0.2591, pruned_loss=0.06146, over 951292.25 frames. ], batch size: 59, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:47:31,086 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.109e+01 1.606e+02 2.003e+02 2.356e+02 8.983e+02, threshold=4.007e+02, percent-clipped=2.0 +2023-03-26 16:47:44,013 INFO [finetune.py:976] (5/7) Epoch 14, batch 1100, loss[loss=0.1738, simple_loss=0.2571, pruned_loss=0.04524, over 4894.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.2591, pruned_loss=0.06126, over 952544.26 frames. ], batch size: 43, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:47:45,245 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6162, 1.4572, 1.5001, 1.4918, 0.9682, 3.0166, 1.0356, 1.5926], + device='cuda:5'), covar=tensor([0.3402, 0.2635, 0.2244, 0.2595, 0.2026, 0.0263, 0.2692, 0.1299], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:47:47,088 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75565.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:48:00,098 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75584.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:48:09,088 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1352, 3.6409, 3.8176, 4.0338, 3.9377, 3.6436, 4.2550, 1.2540], + device='cuda:5'), covar=tensor([0.0830, 0.0775, 0.0819, 0.0871, 0.1245, 0.1782, 0.0751, 0.5532], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0244, 0.0277, 0.0290, 0.0332, 0.0282, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:48:14,332 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-03-26 16:48:18,069 INFO [finetune.py:976] (5/7) Epoch 14, batch 1150, loss[loss=0.188, simple_loss=0.2603, pruned_loss=0.05784, over 4802.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2604, pruned_loss=0.06215, over 953328.73 frames. ], batch size: 41, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:48:19,809 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=75613.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:48:24,054 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75620.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:48:28,192 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75626.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 16:48:38,761 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.806e+01 1.586e+02 1.986e+02 2.337e+02 5.787e+02, threshold=3.972e+02, percent-clipped=2.0 +2023-03-26 16:48:51,183 INFO [finetune.py:976] (5/7) Epoch 14, batch 1200, loss[loss=0.2194, simple_loss=0.2803, pruned_loss=0.07927, over 4900.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2581, pruned_loss=0.061, over 952098.83 frames. ], batch size: 43, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:48:52,370 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8126, 2.3424, 3.0997, 1.9116, 2.6913, 2.9771, 2.1431, 3.2095], + device='cuda:5'), covar=tensor([0.1355, 0.2158, 0.1293, 0.2330, 0.0960, 0.1445, 0.2660, 0.0904], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0204, 0.0191, 0.0189, 0.0175, 0.0213, 0.0215, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:48:56,399 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=75668.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:49:03,630 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3942, 2.1580, 2.7599, 1.5733, 2.3833, 2.5651, 1.9554, 2.8356], + device='cuda:5'), covar=tensor([0.1481, 0.2008, 0.1743, 0.2532, 0.1000, 0.1755, 0.2801, 0.0926], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0204, 0.0191, 0.0189, 0.0176, 0.0214, 0.0215, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:49:15,491 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-03-26 16:49:24,702 INFO [finetune.py:976] (5/7) Epoch 14, batch 1250, loss[loss=0.1575, simple_loss=0.2344, pruned_loss=0.04033, over 4747.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2557, pruned_loss=0.06014, over 953941.75 frames. ], batch size: 27, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:49:27,177 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-03-26 16:49:45,245 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.316e+01 1.497e+02 1.829e+02 2.293e+02 4.240e+02, threshold=3.659e+02, percent-clipped=2.0 +2023-03-26 16:49:56,735 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8283, 1.0066, 1.8204, 1.7303, 1.6314, 1.5368, 1.6357, 1.7234], + device='cuda:5'), covar=tensor([0.3807, 0.4079, 0.3355, 0.3711, 0.4656, 0.3646, 0.4420, 0.3184], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0238, 0.0256, 0.0266, 0.0263, 0.0237, 0.0277, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:49:57,807 INFO [finetune.py:976] (5/7) Epoch 14, batch 1300, loss[loss=0.1738, simple_loss=0.2438, pruned_loss=0.05196, over 4901.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2525, pruned_loss=0.059, over 957205.25 frames. ], batch size: 35, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:50:14,949 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6921, 1.6070, 1.5573, 1.6263, 0.9591, 3.0093, 1.1270, 1.6528], + device='cuda:5'), covar=tensor([0.3067, 0.2407, 0.2075, 0.2344, 0.1938, 0.0259, 0.2500, 0.1173], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0124, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 16:50:31,734 INFO [finetune.py:976] (5/7) Epoch 14, batch 1350, loss[loss=0.1911, simple_loss=0.2589, pruned_loss=0.06162, over 4760.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2538, pruned_loss=0.06019, over 957736.39 frames. ], batch size: 59, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:50:35,371 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8020, 0.9965, 1.8736, 1.7129, 1.6151, 1.5352, 1.6248, 1.7574], + device='cuda:5'), covar=tensor([0.4068, 0.4459, 0.3723, 0.3949, 0.5115, 0.3873, 0.4764, 0.3374], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0238, 0.0256, 0.0266, 0.0263, 0.0236, 0.0276, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:50:52,073 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0999, 1.9311, 2.1314, 1.5045, 2.0186, 2.2482, 2.1494, 1.6784], + device='cuda:5'), covar=tensor([0.0529, 0.0669, 0.0608, 0.0847, 0.0654, 0.0594, 0.0548, 0.1071], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0132, 0.0141, 0.0123, 0.0123, 0.0141, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:51:07,735 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.659e+01 1.636e+02 1.953e+02 2.256e+02 6.748e+02, threshold=3.906e+02, percent-clipped=1.0 +2023-03-26 16:51:19,721 INFO [finetune.py:976] (5/7) Epoch 14, batch 1400, loss[loss=0.253, simple_loss=0.312, pruned_loss=0.09704, over 4226.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2574, pruned_loss=0.06154, over 954580.47 frames. ], batch size: 65, lr: 3.56e-03, grad_scale: 16.0 +2023-03-26 16:51:35,785 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75884.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:51:53,116 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1964, 2.0500, 1.6914, 2.0048, 1.8973, 1.8979, 1.9170, 2.6665], + device='cuda:5'), covar=tensor([0.4061, 0.5338, 0.3822, 0.4807, 0.4424, 0.2712, 0.4570, 0.1857], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0226, 0.0278, 0.0247, 0.0214, 0.0249, 0.0224], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:51:53,579 INFO [finetune.py:976] (5/7) Epoch 14, batch 1450, loss[loss=0.1779, simple_loss=0.2487, pruned_loss=0.05353, over 4814.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.2576, pruned_loss=0.06112, over 954743.60 frames. ], batch size: 33, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:52:08,068 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75926.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 16:52:17,404 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=75932.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:52:27,770 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.664e+02 1.939e+02 2.609e+02 1.085e+03, threshold=3.877e+02, percent-clipped=5.0 +2023-03-26 16:52:44,461 INFO [finetune.py:976] (5/7) Epoch 14, batch 1500, loss[loss=0.2039, simple_loss=0.2582, pruned_loss=0.07486, over 4799.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2584, pruned_loss=0.06126, over 954757.73 frames. ], batch size: 45, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:52:52,977 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=75974.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:53:14,328 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.82 vs. limit=5.0 +2023-03-26 16:53:19,428 INFO [finetune.py:976] (5/7) Epoch 14, batch 1550, loss[loss=0.1929, simple_loss=0.2489, pruned_loss=0.06845, over 4798.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.2585, pruned_loss=0.06103, over 953081.85 frames. ], batch size: 25, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:53:33,198 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6306, 2.4369, 2.9399, 1.7388, 2.6882, 2.8949, 2.1754, 3.1149], + device='cuda:5'), covar=tensor([0.1398, 0.1790, 0.1372, 0.2572, 0.0870, 0.1574, 0.2560, 0.0892], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0204, 0.0192, 0.0189, 0.0176, 0.0213, 0.0216, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:53:40,208 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.569e+01 1.489e+02 1.761e+02 2.263e+02 3.823e+02, threshold=3.522e+02, percent-clipped=0.0 +2023-03-26 16:53:42,226 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 16:53:53,249 INFO [finetune.py:976] (5/7) Epoch 14, batch 1600, loss[loss=0.155, simple_loss=0.2208, pruned_loss=0.04462, over 4814.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2557, pruned_loss=0.06038, over 953038.52 frames. ], batch size: 25, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:54:14,227 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6914, 2.4511, 2.0005, 1.0847, 2.3216, 2.0355, 1.8811, 2.1180], + device='cuda:5'), covar=tensor([0.0856, 0.0911, 0.1685, 0.2079, 0.1475, 0.1957, 0.2145, 0.1131], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0196, 0.0198, 0.0184, 0.0212, 0.0206, 0.0220, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:54:19,214 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.16 vs. limit=5.0 +2023-03-26 16:54:26,636 INFO [finetune.py:976] (5/7) Epoch 14, batch 1650, loss[loss=0.1711, simple_loss=0.2326, pruned_loss=0.05475, over 4832.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2518, pruned_loss=0.05898, over 954881.42 frames. ], batch size: 33, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:54:41,413 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1935, 3.6776, 3.8795, 4.0766, 3.9909, 3.7135, 4.3078, 1.3594], + device='cuda:5'), covar=tensor([0.0823, 0.0883, 0.0900, 0.0896, 0.1177, 0.1491, 0.0720, 0.5427], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0244, 0.0277, 0.0291, 0.0331, 0.0282, 0.0302, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:54:47,812 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.271e+01 1.580e+02 1.872e+02 2.187e+02 4.946e+02, threshold=3.744e+02, percent-clipped=3.0 +2023-03-26 16:54:48,284 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.75 vs. limit=5.0 +2023-03-26 16:55:00,266 INFO [finetune.py:976] (5/7) Epoch 14, batch 1700, loss[loss=0.2069, simple_loss=0.26, pruned_loss=0.07693, over 4913.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2499, pruned_loss=0.05861, over 953801.85 frames. ], batch size: 36, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:55:04,713 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 16:55:34,231 INFO [finetune.py:976] (5/7) Epoch 14, batch 1750, loss[loss=0.2332, simple_loss=0.2712, pruned_loss=0.0976, over 4239.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.2519, pruned_loss=0.05966, over 954815.25 frames. ], batch size: 18, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:55:34,375 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9854, 1.7515, 1.6194, 1.9108, 2.6129, 1.9664, 2.1304, 1.4771], + device='cuda:5'), covar=tensor([0.2227, 0.2066, 0.1954, 0.1780, 0.1854, 0.1198, 0.1932, 0.1928], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0205, 0.0209, 0.0188, 0.0239, 0.0183, 0.0212, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:55:38,303 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-03-26 16:55:42,836 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9009, 1.2358, 1.9031, 1.8392, 1.6636, 1.6043, 1.7779, 1.7475], + device='cuda:5'), covar=tensor([0.3884, 0.4335, 0.3679, 0.3859, 0.5208, 0.3949, 0.4786, 0.3492], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0239, 0.0257, 0.0266, 0.0264, 0.0238, 0.0278, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:55:55,255 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.620e+02 1.973e+02 2.349e+02 4.562e+02, threshold=3.945e+02, percent-clipped=4.0 +2023-03-26 16:56:02,998 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1896, 2.0224, 2.1000, 1.5802, 1.9832, 2.2839, 2.3036, 1.7143], + device='cuda:5'), covar=tensor([0.0553, 0.0581, 0.0655, 0.0896, 0.0747, 0.0586, 0.0521, 0.1055], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0134, 0.0144, 0.0125, 0.0126, 0.0143, 0.0142, 0.0165], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 16:56:17,742 INFO [finetune.py:976] (5/7) Epoch 14, batch 1800, loss[loss=0.1725, simple_loss=0.2497, pruned_loss=0.04762, over 4772.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2553, pruned_loss=0.06035, over 955730.09 frames. ], batch size: 54, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:56:27,652 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76269.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 16:56:47,401 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76294.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:56:58,601 INFO [finetune.py:976] (5/7) Epoch 14, batch 1850, loss[loss=0.2279, simple_loss=0.2817, pruned_loss=0.08702, over 4753.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.257, pruned_loss=0.06102, over 954912.75 frames. ], batch size: 54, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:57:07,113 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76323.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 16:57:11,394 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76330.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 16:57:19,099 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.107e+02 1.514e+02 1.907e+02 2.299e+02 3.483e+02, threshold=3.815e+02, percent-clipped=0.0 +2023-03-26 16:57:24,322 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2557, 1.2689, 1.3716, 0.6957, 1.2982, 1.5333, 1.5683, 1.3256], + device='cuda:5'), covar=tensor([0.1053, 0.0897, 0.0508, 0.0641, 0.0565, 0.0924, 0.0401, 0.0733], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0154, 0.0123, 0.0130, 0.0131, 0.0127, 0.0142, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.3674e-05, 1.1209e-04, 8.8247e-05, 9.3516e-05, 9.3045e-05, 9.2085e-05, + 1.0321e-04, 1.0613e-04], device='cuda:5') +2023-03-26 16:57:35,298 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76355.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 16:57:38,872 INFO [finetune.py:976] (5/7) Epoch 14, batch 1900, loss[loss=0.2003, simple_loss=0.267, pruned_loss=0.0668, over 4818.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2574, pruned_loss=0.06093, over 952142.88 frames. ], batch size: 33, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:57:57,152 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76384.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 16:58:15,531 INFO [finetune.py:976] (5/7) Epoch 14, batch 1950, loss[loss=0.1485, simple_loss=0.2225, pruned_loss=0.03724, over 4928.00 frames. ], tot_loss[loss=0.1876, simple_loss=0.2553, pruned_loss=0.05994, over 952827.46 frames. ], batch size: 33, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:58:35,771 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.938e+01 1.459e+02 1.786e+02 2.082e+02 3.715e+02, threshold=3.572e+02, percent-clipped=0.0 +2023-03-26 16:58:49,146 INFO [finetune.py:976] (5/7) Epoch 14, batch 2000, loss[loss=0.1792, simple_loss=0.2407, pruned_loss=0.05885, over 4690.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2523, pruned_loss=0.05874, over 954241.26 frames. ], batch size: 23, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:59:22,665 INFO [finetune.py:976] (5/7) Epoch 14, batch 2050, loss[loss=0.1394, simple_loss=0.2139, pruned_loss=0.03243, over 4797.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.249, pruned_loss=0.05734, over 955737.24 frames. ], batch size: 45, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 16:59:42,964 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.833e+01 1.460e+02 1.798e+02 2.157e+02 5.136e+02, threshold=3.595e+02, percent-clipped=3.0 +2023-03-26 16:59:56,036 INFO [finetune.py:976] (5/7) Epoch 14, batch 2100, loss[loss=0.2071, simple_loss=0.2737, pruned_loss=0.07024, over 4836.00 frames. ], tot_loss[loss=0.1824, simple_loss=0.2495, pruned_loss=0.05762, over 955890.73 frames. ], batch size: 49, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:00:29,581 INFO [finetune.py:976] (5/7) Epoch 14, batch 2150, loss[loss=0.1791, simple_loss=0.2575, pruned_loss=0.05028, over 4827.00 frames. ], tot_loss[loss=0.1843, simple_loss=0.2525, pruned_loss=0.05805, over 956085.53 frames. ], batch size: 39, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:00:38,762 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76625.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 17:00:50,410 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.680e+02 1.855e+02 2.274e+02 3.771e+02, threshold=3.710e+02, percent-clipped=2.0 +2023-03-26 17:00:55,369 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76650.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:01:02,487 INFO [finetune.py:976] (5/7) Epoch 14, batch 2200, loss[loss=0.1656, simple_loss=0.2295, pruned_loss=0.05081, over 4722.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2561, pruned_loss=0.05911, over 955461.67 frames. ], batch size: 23, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:01:21,669 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76679.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 17:01:57,532 INFO [finetune.py:976] (5/7) Epoch 14, batch 2250, loss[loss=0.2207, simple_loss=0.2813, pruned_loss=0.08, over 4788.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2579, pruned_loss=0.06025, over 954476.47 frames. ], batch size: 29, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:02:18,737 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.580e+02 1.848e+02 2.151e+02 3.368e+02, threshold=3.695e+02, percent-clipped=0.0 +2023-03-26 17:02:31,272 INFO [finetune.py:976] (5/7) Epoch 14, batch 2300, loss[loss=0.2134, simple_loss=0.2829, pruned_loss=0.07196, over 4691.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2588, pruned_loss=0.06064, over 951432.49 frames. ], batch size: 59, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:02:55,580 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 17:03:06,757 INFO [finetune.py:976] (5/7) Epoch 14, batch 2350, loss[loss=0.2518, simple_loss=0.3048, pruned_loss=0.09939, over 4850.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2575, pruned_loss=0.06043, over 952530.04 frames. ], batch size: 49, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:03:16,602 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-03-26 17:03:22,938 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9466, 1.7936, 1.6066, 1.9063, 2.2221, 1.8638, 1.7097, 1.6353], + device='cuda:5'), covar=tensor([0.1515, 0.1616, 0.1559, 0.1283, 0.1439, 0.1075, 0.2149, 0.1503], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0206, 0.0210, 0.0189, 0.0240, 0.0184, 0.0213, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:03:25,973 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76839.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:03:28,175 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.075e+02 1.531e+02 1.863e+02 2.217e+02 4.521e+02, threshold=3.725e+02, percent-clipped=2.0 +2023-03-26 17:03:40,657 INFO [finetune.py:976] (5/7) Epoch 14, batch 2400, loss[loss=0.2119, simple_loss=0.2726, pruned_loss=0.07562, over 4940.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2545, pruned_loss=0.05937, over 953283.36 frames. ], batch size: 38, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:04:05,872 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1520, 1.3100, 1.3618, 0.8100, 1.3526, 1.5998, 1.6071, 1.2733], + device='cuda:5'), covar=tensor([0.1076, 0.0745, 0.0648, 0.0635, 0.0519, 0.0622, 0.0446, 0.0859], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0154, 0.0122, 0.0130, 0.0131, 0.0128, 0.0142, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.3935e-05, 1.1229e-04, 8.8142e-05, 9.3563e-05, 9.3229e-05, 9.2386e-05, + 1.0322e-04, 1.0578e-04], device='cuda:5') +2023-03-26 17:04:06,945 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76900.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:04:13,512 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5339, 1.4513, 1.8856, 1.8384, 1.5389, 3.4637, 1.4083, 1.4763], + device='cuda:5'), covar=tensor([0.0936, 0.1805, 0.1112, 0.0924, 0.1592, 0.0231, 0.1400, 0.1782], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:04:14,026 INFO [finetune.py:976] (5/7) Epoch 14, batch 2450, loss[loss=0.195, simple_loss=0.2692, pruned_loss=0.06038, over 4933.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2517, pruned_loss=0.05876, over 952926.03 frames. ], batch size: 33, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:04:23,093 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76925.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:04:34,662 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.650e+01 1.627e+02 1.914e+02 2.448e+02 4.488e+02, threshold=3.829e+02, percent-clipped=3.0 +2023-03-26 17:04:40,118 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76950.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:04:47,676 INFO [finetune.py:976] (5/7) Epoch 14, batch 2500, loss[loss=0.1898, simple_loss=0.27, pruned_loss=0.05479, over 4822.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2549, pruned_loss=0.06052, over 953612.54 frames. ], batch size: 39, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:04:55,526 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=76973.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:04:59,668 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76979.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 17:05:12,272 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=76998.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:05:14,841 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3687, 2.3279, 2.0240, 1.0947, 2.2150, 1.8140, 1.6605, 2.1847], + device='cuda:5'), covar=tensor([0.0987, 0.0836, 0.1716, 0.2196, 0.1529, 0.2449, 0.2416, 0.0941], + device='cuda:5'), in_proj_covar=tensor([0.0166, 0.0194, 0.0197, 0.0183, 0.0212, 0.0207, 0.0221, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:05:21,745 INFO [finetune.py:976] (5/7) Epoch 14, batch 2550, loss[loss=0.1893, simple_loss=0.2724, pruned_loss=0.05315, over 4817.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2585, pruned_loss=0.06123, over 955506.15 frames. ], batch size: 40, lr: 3.55e-03, grad_scale: 16.0 +2023-03-26 17:05:32,006 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=77027.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:05:42,424 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.599e+02 1.863e+02 2.531e+02 5.028e+02, threshold=3.725e+02, percent-clipped=2.0 +2023-03-26 17:05:49,702 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5765, 1.5914, 1.3211, 1.5024, 2.0534, 1.7986, 1.6435, 1.3933], + device='cuda:5'), covar=tensor([0.0370, 0.0345, 0.0619, 0.0343, 0.0217, 0.0529, 0.0360, 0.0417], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0141, 0.0113, 0.0101, 0.0106, 0.0096, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.3175e-05, 8.4358e-05, 1.1175e-04, 8.7859e-05, 7.8520e-05, 7.8531e-05, + 7.2577e-05, 8.2410e-05], device='cuda:5') +2023-03-26 17:05:55,390 INFO [finetune.py:976] (5/7) Epoch 14, batch 2600, loss[loss=0.2288, simple_loss=0.2959, pruned_loss=0.0809, over 4816.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2588, pruned_loss=0.06111, over 955477.29 frames. ], batch size: 45, lr: 3.55e-03, grad_scale: 32.0 +2023-03-26 17:06:18,024 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77095.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:06:30,489 INFO [finetune.py:976] (5/7) Epoch 14, batch 2650, loss[loss=0.1979, simple_loss=0.2792, pruned_loss=0.05832, over 4776.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2601, pruned_loss=0.06217, over 952630.53 frames. ], batch size: 29, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:07:08,840 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.606e+02 1.948e+02 2.371e+02 3.624e+02, threshold=3.895e+02, percent-clipped=0.0 +2023-03-26 17:07:22,897 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77156.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 17:07:26,328 INFO [finetune.py:976] (5/7) Epoch 14, batch 2700, loss[loss=0.1721, simple_loss=0.2313, pruned_loss=0.05641, over 4847.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.258, pruned_loss=0.06126, over 951971.78 frames. ], batch size: 49, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:07:57,701 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77195.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:08:01,518 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3901, 1.2170, 1.6843, 2.4376, 1.6379, 2.1236, 0.8418, 2.0231], + device='cuda:5'), covar=tensor([0.1690, 0.1498, 0.1154, 0.0739, 0.0904, 0.1206, 0.1648, 0.0673], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0164, 0.0100, 0.0137, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:08:07,956 INFO [finetune.py:976] (5/7) Epoch 14, batch 2750, loss[loss=0.1685, simple_loss=0.2331, pruned_loss=0.05191, over 4906.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2552, pruned_loss=0.06039, over 952484.18 frames. ], batch size: 37, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:08:08,058 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77211.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:08:28,395 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.514e+02 1.871e+02 2.163e+02 3.576e+02, threshold=3.742e+02, percent-clipped=0.0 +2023-03-26 17:08:40,954 INFO [finetune.py:976] (5/7) Epoch 14, batch 2800, loss[loss=0.1354, simple_loss=0.2074, pruned_loss=0.03166, over 4936.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2517, pruned_loss=0.05956, over 953883.78 frames. ], batch size: 33, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:08:48,266 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77272.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:08:48,869 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77273.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:08:52,222 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77277.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:08:54,881 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.56 vs. limit=2.0 +2023-03-26 17:08:56,438 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9099, 1.7720, 1.6017, 1.7128, 1.2607, 4.1476, 1.7518, 2.0647], + device='cuda:5'), covar=tensor([0.3160, 0.2357, 0.2100, 0.2329, 0.1746, 0.0146, 0.2501, 0.1244], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0121, 0.0125, 0.0116, 0.0098, 0.0097, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:09:14,627 INFO [finetune.py:976] (5/7) Epoch 14, batch 2850, loss[loss=0.242, simple_loss=0.2954, pruned_loss=0.09429, over 4780.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2508, pruned_loss=0.05951, over 954357.03 frames. ], batch size: 29, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:09:29,096 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6839, 3.6730, 3.4346, 1.5628, 3.7323, 2.8224, 1.1432, 2.4264], + device='cuda:5'), covar=tensor([0.2217, 0.2044, 0.1590, 0.3340, 0.1112, 0.1058, 0.3886, 0.1515], + device='cuda:5'), in_proj_covar=tensor([0.0149, 0.0171, 0.0157, 0.0126, 0.0154, 0.0120, 0.0143, 0.0121], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 17:09:29,754 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77334.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:09:30,961 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1297, 2.1391, 1.8123, 2.2537, 2.1319, 1.8674, 2.4907, 2.2193], + device='cuda:5'), covar=tensor([0.1319, 0.2254, 0.2643, 0.2488, 0.2242, 0.1522, 0.2712, 0.1588], + device='cuda:5'), in_proj_covar=tensor([0.0179, 0.0187, 0.0232, 0.0252, 0.0243, 0.0198, 0.0212, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:09:32,146 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77338.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:09:34,910 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.563e+02 1.884e+02 2.320e+02 5.201e+02, threshold=3.768e+02, percent-clipped=2.0 +2023-03-26 17:09:36,225 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7449, 1.3439, 1.0436, 1.7061, 1.9956, 1.3108, 1.5546, 1.7426], + device='cuda:5'), covar=tensor([0.1173, 0.1670, 0.1726, 0.0950, 0.1682, 0.1974, 0.1180, 0.1477], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0112, 0.0092, 0.0119, 0.0094, 0.0100, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 17:09:42,656 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 17:09:47,962 INFO [finetune.py:976] (5/7) Epoch 14, batch 2900, loss[loss=0.1719, simple_loss=0.2575, pruned_loss=0.04312, over 4908.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2542, pruned_loss=0.06108, over 952471.53 frames. ], batch size: 37, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:10:21,779 INFO [finetune.py:976] (5/7) Epoch 14, batch 2950, loss[loss=0.2143, simple_loss=0.2831, pruned_loss=0.07271, over 4752.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2591, pruned_loss=0.06263, over 951329.13 frames. ], batch size: 27, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:10:21,985 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 17:10:41,986 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.681e+01 1.651e+02 1.924e+02 2.203e+02 4.754e+02, threshold=3.848e+02, percent-clipped=2.0 +2023-03-26 17:10:48,032 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77451.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 17:10:54,988 INFO [finetune.py:976] (5/7) Epoch 14, batch 3000, loss[loss=0.1631, simple_loss=0.2253, pruned_loss=0.05046, over 4712.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2587, pruned_loss=0.06229, over 951858.57 frames. ], batch size: 23, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:10:54,988 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 17:11:09,361 INFO [finetune.py:1010] (5/7) Epoch 14, validation: loss=0.1563, simple_loss=0.2268, pruned_loss=0.04293, over 2265189.00 frames. +2023-03-26 17:11:09,361 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 17:11:34,084 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77495.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:11:44,252 INFO [finetune.py:976] (5/7) Epoch 14, batch 3050, loss[loss=0.1828, simple_loss=0.2506, pruned_loss=0.05745, over 4755.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2591, pruned_loss=0.06219, over 951666.75 frames. ], batch size: 27, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:12:13,224 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.567e+02 1.800e+02 2.244e+02 5.193e+02, threshold=3.600e+02, percent-clipped=2.0 +2023-03-26 17:12:13,931 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=77543.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:12:22,889 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0119, 1.9125, 1.6266, 1.8285, 1.7790, 1.7868, 1.8232, 2.5063], + device='cuda:5'), covar=tensor([0.3748, 0.4288, 0.3096, 0.3731, 0.4071, 0.2298, 0.3609, 0.1607], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0225, 0.0277, 0.0246, 0.0214, 0.0249, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:12:35,686 INFO [finetune.py:976] (5/7) Epoch 14, batch 3100, loss[loss=0.2061, simple_loss=0.2636, pruned_loss=0.07432, over 4907.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2572, pruned_loss=0.06093, over 953038.69 frames. ], batch size: 36, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:12:39,911 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77567.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:12:49,594 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.72 vs. limit=5.0 +2023-03-26 17:12:59,913 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6808, 1.5143, 1.4226, 1.5414, 1.8759, 1.8203, 1.5745, 1.3987], + device='cuda:5'), covar=tensor([0.0254, 0.0293, 0.0515, 0.0289, 0.0188, 0.0438, 0.0310, 0.0388], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0142, 0.0113, 0.0101, 0.0106, 0.0096, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2814e-05, 8.4671e-05, 1.1203e-04, 8.8164e-05, 7.8756e-05, 7.8553e-05, + 7.2363e-05, 8.2818e-05], device='cuda:5') +2023-03-26 17:13:22,407 INFO [finetune.py:976] (5/7) Epoch 14, batch 3150, loss[loss=0.1473, simple_loss=0.227, pruned_loss=0.03381, over 4826.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2549, pruned_loss=0.06029, over 953135.14 frames. ], batch size: 39, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:13:25,549 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77616.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:13:35,438 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77629.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:13:37,887 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77633.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:13:43,292 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.167e+02 1.643e+02 1.968e+02 2.398e+02 4.679e+02, threshold=3.936e+02, percent-clipped=3.0 +2023-03-26 17:13:56,367 INFO [finetune.py:976] (5/7) Epoch 14, batch 3200, loss[loss=0.1819, simple_loss=0.2521, pruned_loss=0.05585, over 4832.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2506, pruned_loss=0.0584, over 954001.83 frames. ], batch size: 33, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:14:04,861 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1068, 2.0099, 1.7330, 1.9781, 1.8792, 1.8325, 1.8902, 2.6197], + device='cuda:5'), covar=tensor([0.4206, 0.4682, 0.3644, 0.4405, 0.4265, 0.2684, 0.4387, 0.1805], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0260, 0.0225, 0.0277, 0.0246, 0.0214, 0.0249, 0.0223], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:14:07,164 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77677.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:14:07,175 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77677.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:14:15,106 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.82 vs. limit=5.0 +2023-03-26 17:14:29,512 INFO [finetune.py:976] (5/7) Epoch 14, batch 3250, loss[loss=0.3191, simple_loss=0.3579, pruned_loss=0.1401, over 4106.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2525, pruned_loss=0.05988, over 951752.63 frames. ], batch size: 65, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:14:29,631 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2574, 1.1685, 1.4531, 2.1018, 1.4507, 1.8169, 0.7890, 1.7422], + device='cuda:5'), covar=tensor([0.1351, 0.1252, 0.1018, 0.0666, 0.0792, 0.1074, 0.1371, 0.0597], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0134, 0.0165, 0.0101, 0.0138, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:14:47,329 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77738.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:14:49,601 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.063e+02 1.623e+02 1.885e+02 2.287e+02 7.301e+02, threshold=3.769e+02, percent-clipped=4.0 +2023-03-26 17:14:55,594 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77751.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 17:15:02,068 INFO [finetune.py:976] (5/7) Epoch 14, batch 3300, loss[loss=0.2316, simple_loss=0.2989, pruned_loss=0.08215, over 4300.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.257, pruned_loss=0.0614, over 950137.71 frames. ], batch size: 65, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:15:27,676 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=77799.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:15:35,623 INFO [finetune.py:976] (5/7) Epoch 14, batch 3350, loss[loss=0.2363, simple_loss=0.2986, pruned_loss=0.08704, over 4841.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2585, pruned_loss=0.06187, over 953051.33 frames. ], batch size: 49, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:15:42,197 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9756, 1.3856, 1.9581, 1.8198, 1.6487, 1.5880, 1.7607, 1.7973], + device='cuda:5'), covar=tensor([0.4000, 0.4192, 0.3298, 0.3944, 0.4897, 0.3919, 0.4776, 0.3394], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0237, 0.0254, 0.0264, 0.0261, 0.0235, 0.0275, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:15:52,884 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5594, 1.4808, 1.3935, 1.5551, 1.8616, 1.7417, 1.6184, 1.3444], + device='cuda:5'), covar=tensor([0.0365, 0.0281, 0.0572, 0.0273, 0.0226, 0.0422, 0.0268, 0.0418], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0111, 0.0143, 0.0114, 0.0102, 0.0108, 0.0097, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.3569e-05, 8.5733e-05, 1.1324e-04, 8.9016e-05, 7.9573e-05, 7.9645e-05, + 7.3148e-05, 8.3951e-05], device='cuda:5') +2023-03-26 17:15:57,263 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.683e+02 1.958e+02 2.277e+02 5.309e+02, threshold=3.915e+02, percent-clipped=1.0 +2023-03-26 17:16:09,333 INFO [finetune.py:976] (5/7) Epoch 14, batch 3400, loss[loss=0.1824, simple_loss=0.2602, pruned_loss=0.05233, over 4865.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2582, pruned_loss=0.06115, over 953182.65 frames. ], batch size: 31, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:16:18,563 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77867.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:16:29,068 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-03-26 17:16:51,418 INFO [finetune.py:976] (5/7) Epoch 14, batch 3450, loss[loss=0.1797, simple_loss=0.252, pruned_loss=0.05373, over 4729.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2576, pruned_loss=0.06052, over 953503.29 frames. ], batch size: 26, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:16:53,841 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=77915.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:17:03,231 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77929.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:17:05,715 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77933.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:17:06,343 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7797, 1.5907, 1.4791, 1.8204, 1.9967, 1.8568, 1.2390, 1.4668], + device='cuda:5'), covar=tensor([0.2217, 0.2027, 0.1966, 0.1694, 0.1698, 0.1221, 0.2597, 0.1951], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0206, 0.0209, 0.0190, 0.0239, 0.0183, 0.0214, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:17:11,408 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.160e+01 1.485e+02 1.825e+02 2.093e+02 4.049e+02, threshold=3.650e+02, percent-clipped=1.0 +2023-03-26 17:17:33,220 INFO [finetune.py:976] (5/7) Epoch 14, batch 3500, loss[loss=0.211, simple_loss=0.283, pruned_loss=0.06951, over 4820.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2561, pruned_loss=0.0607, over 951999.33 frames. ], batch size: 40, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:17:40,924 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77972.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:17:44,502 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=77977.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:17:46,958 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=77981.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:18:20,967 INFO [finetune.py:976] (5/7) Epoch 14, batch 3550, loss[loss=0.1393, simple_loss=0.2074, pruned_loss=0.03558, over 4761.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2531, pruned_loss=0.05982, over 953668.03 frames. ], batch size: 28, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:18:27,049 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4627, 1.3488, 1.3338, 1.3609, 0.8575, 2.3378, 0.8010, 1.2902], + device='cuda:5'), covar=tensor([0.3322, 0.2476, 0.2241, 0.2477, 0.2015, 0.0353, 0.2622, 0.1285], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0123, 0.0115, 0.0097, 0.0096, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:18:32,362 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0531, 2.7667, 2.2628, 1.3655, 2.5472, 2.4290, 2.1765, 2.4841], + device='cuda:5'), covar=tensor([0.0744, 0.0690, 0.1647, 0.1982, 0.1214, 0.1593, 0.1869, 0.0912], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0195, 0.0199, 0.0184, 0.0214, 0.0208, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:18:35,871 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78033.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:18:41,166 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.505e+02 1.960e+02 2.472e+02 4.194e+02, threshold=3.920e+02, percent-clipped=4.0 +2023-03-26 17:18:54,341 INFO [finetune.py:976] (5/7) Epoch 14, batch 3600, loss[loss=0.1623, simple_loss=0.2419, pruned_loss=0.04134, over 4899.00 frames. ], tot_loss[loss=0.1835, simple_loss=0.2499, pruned_loss=0.05858, over 954443.37 frames. ], batch size: 43, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:19:10,078 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.16 vs. limit=5.0 +2023-03-26 17:19:28,413 INFO [finetune.py:976] (5/7) Epoch 14, batch 3650, loss[loss=0.2172, simple_loss=0.2965, pruned_loss=0.06895, over 4821.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2514, pruned_loss=0.05893, over 953856.90 frames. ], batch size: 39, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:19:35,154 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0024, 0.9826, 0.9284, 1.1237, 1.2005, 1.1208, 0.9672, 0.9354], + device='cuda:5'), covar=tensor([0.0363, 0.0275, 0.0623, 0.0291, 0.0290, 0.0472, 0.0328, 0.0437], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0111, 0.0143, 0.0115, 0.0102, 0.0108, 0.0098, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.3679e-05, 8.5854e-05, 1.1329e-04, 8.9437e-05, 7.9590e-05, 8.0299e-05, + 7.3587e-05, 8.4191e-05], device='cuda:5') +2023-03-26 17:19:35,372 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-03-26 17:19:48,730 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.050e+02 1.711e+02 2.033e+02 2.406e+02 8.151e+02, threshold=4.067e+02, percent-clipped=4.0 +2023-03-26 17:19:54,192 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6913, 3.9874, 3.7257, 1.8165, 4.1016, 2.9829, 1.0934, 2.7729], + device='cuda:5'), covar=tensor([0.2538, 0.1898, 0.1520, 0.3725, 0.0958, 0.0979, 0.4537, 0.1626], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0173, 0.0159, 0.0127, 0.0156, 0.0121, 0.0144, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 17:20:00,533 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9876, 1.8124, 1.6200, 2.0330, 2.5361, 2.0425, 1.5603, 1.5721], + device='cuda:5'), covar=tensor([0.2051, 0.1876, 0.1828, 0.1523, 0.1458, 0.1049, 0.2295, 0.1807], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0209, 0.0211, 0.0192, 0.0243, 0.0186, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:20:02,234 INFO [finetune.py:976] (5/7) Epoch 14, batch 3700, loss[loss=0.1825, simple_loss=0.2466, pruned_loss=0.0592, over 4903.00 frames. ], tot_loss[loss=0.1865, simple_loss=0.2537, pruned_loss=0.05969, over 951246.44 frames. ], batch size: 43, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:20:35,984 INFO [finetune.py:976] (5/7) Epoch 14, batch 3750, loss[loss=0.2078, simple_loss=0.2761, pruned_loss=0.06974, over 4834.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2564, pruned_loss=0.06104, over 950798.16 frames. ], batch size: 30, lr: 3.54e-03, grad_scale: 32.0 +2023-03-26 17:20:49,420 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78232.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:20:55,809 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.186e+02 1.631e+02 1.949e+02 2.239e+02 4.423e+02, threshold=3.899e+02, percent-clipped=1.0 +2023-03-26 17:21:08,216 INFO [finetune.py:976] (5/7) Epoch 14, batch 3800, loss[loss=0.2078, simple_loss=0.2786, pruned_loss=0.06847, over 4807.00 frames. ], tot_loss[loss=0.1888, simple_loss=0.2565, pruned_loss=0.06056, over 951352.00 frames. ], batch size: 45, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:21:15,896 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78272.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:21:31,061 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78293.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 17:21:49,335 INFO [finetune.py:976] (5/7) Epoch 14, batch 3850, loss[loss=0.1483, simple_loss=0.2225, pruned_loss=0.03703, over 4777.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.256, pruned_loss=0.06044, over 951933.54 frames. ], batch size: 26, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:21:51,149 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7446, 1.1788, 0.8023, 1.5656, 2.1202, 1.3395, 1.4478, 1.5267], + device='cuda:5'), covar=tensor([0.1502, 0.2323, 0.2114, 0.1278, 0.1807, 0.1980, 0.1513, 0.2029], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0111, 0.0092, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:21:55,866 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=78320.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:22:03,807 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78333.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:22:04,942 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8639, 1.3436, 1.8264, 1.8006, 1.6420, 1.5525, 1.7526, 1.6626], + device='cuda:5'), covar=tensor([0.3923, 0.4008, 0.3516, 0.3661, 0.4905, 0.3907, 0.4398, 0.3245], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0239, 0.0257, 0.0267, 0.0264, 0.0238, 0.0278, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:22:10,191 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.042e+02 1.518e+02 1.784e+02 2.158e+02 4.566e+02, threshold=3.568e+02, percent-clipped=2.0 +2023-03-26 17:22:18,809 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.11 vs. limit=5.0 +2023-03-26 17:22:22,702 INFO [finetune.py:976] (5/7) Epoch 14, batch 3900, loss[loss=0.1812, simple_loss=0.251, pruned_loss=0.0557, over 4903.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2541, pruned_loss=0.05965, over 953621.33 frames. ], batch size: 35, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:22:38,355 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8286, 1.7845, 2.0579, 1.4278, 1.8662, 2.0342, 1.6632, 2.1840], + device='cuda:5'), covar=tensor([0.1086, 0.1703, 0.1296, 0.1798, 0.0789, 0.1307, 0.2210, 0.0676], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0204, 0.0189, 0.0189, 0.0175, 0.0212, 0.0215, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:22:45,627 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=78381.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:22:59,754 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6526, 2.3857, 1.9933, 1.0182, 2.2094, 2.0090, 1.9139, 2.2465], + device='cuda:5'), covar=tensor([0.0796, 0.0906, 0.1692, 0.2063, 0.1494, 0.2302, 0.2072, 0.0930], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0195, 0.0199, 0.0183, 0.0213, 0.0207, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:23:09,955 INFO [finetune.py:976] (5/7) Epoch 14, batch 3950, loss[loss=0.141, simple_loss=0.2139, pruned_loss=0.03408, over 4748.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2514, pruned_loss=0.05848, over 954889.17 frames. ], batch size: 27, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:23:37,906 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.052e+02 1.587e+02 1.893e+02 2.259e+02 3.905e+02, threshold=3.786e+02, percent-clipped=1.0 +2023-03-26 17:23:46,351 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1478, 2.0900, 1.6670, 2.2009, 2.1481, 1.8415, 2.5488, 2.2390], + device='cuda:5'), covar=tensor([0.1397, 0.2364, 0.3161, 0.2776, 0.2457, 0.1715, 0.2863, 0.1763], + device='cuda:5'), in_proj_covar=tensor([0.0181, 0.0188, 0.0235, 0.0254, 0.0245, 0.0200, 0.0212, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:23:50,389 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78460.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:23:50,893 INFO [finetune.py:976] (5/7) Epoch 14, batch 4000, loss[loss=0.199, simple_loss=0.2726, pruned_loss=0.0627, over 4825.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2519, pruned_loss=0.05905, over 955270.13 frames. ], batch size: 39, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:24:21,425 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6136, 2.3042, 1.8412, 0.8638, 2.1442, 2.0472, 2.0131, 2.1515], + device='cuda:5'), covar=tensor([0.0878, 0.0745, 0.1512, 0.2132, 0.1321, 0.2034, 0.1784, 0.0932], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0195, 0.0200, 0.0184, 0.0214, 0.0207, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:24:24,844 INFO [finetune.py:976] (5/7) Epoch 14, batch 4050, loss[loss=0.2335, simple_loss=0.2947, pruned_loss=0.08618, over 4846.00 frames. ], tot_loss[loss=0.1878, simple_loss=0.255, pruned_loss=0.06029, over 955124.85 frames. ], batch size: 49, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:24:31,513 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78521.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:24:36,091 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7279, 1.6465, 1.5725, 1.6673, 1.2658, 4.4655, 1.6613, 2.2337], + device='cuda:5'), covar=tensor([0.3416, 0.2609, 0.2188, 0.2413, 0.1780, 0.0092, 0.2389, 0.1157], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0121, 0.0124, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:24:45,460 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.285e+01 1.581e+02 1.919e+02 2.315e+02 3.488e+02, threshold=3.837e+02, percent-clipped=0.0 +2023-03-26 17:24:54,867 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78556.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:24:57,779 INFO [finetune.py:976] (5/7) Epoch 14, batch 4100, loss[loss=0.2121, simple_loss=0.2955, pruned_loss=0.06436, over 4818.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2572, pruned_loss=0.06051, over 956708.64 frames. ], batch size: 39, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:25:16,502 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78588.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 17:25:31,553 INFO [finetune.py:976] (5/7) Epoch 14, batch 4150, loss[loss=0.2394, simple_loss=0.3069, pruned_loss=0.08598, over 4897.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2583, pruned_loss=0.06086, over 956225.42 frames. ], batch size: 35, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:25:35,323 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78617.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:25:52,380 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.171e+02 1.587e+02 1.869e+02 2.218e+02 3.242e+02, threshold=3.739e+02, percent-clipped=0.0 +2023-03-26 17:25:53,716 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4602, 2.5904, 2.3721, 1.6876, 2.4195, 2.7839, 2.7019, 2.2263], + device='cuda:5'), covar=tensor([0.0583, 0.0584, 0.0735, 0.0964, 0.0869, 0.0643, 0.0596, 0.0994], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0133, 0.0141, 0.0124, 0.0124, 0.0141, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:26:04,877 INFO [finetune.py:976] (5/7) Epoch 14, batch 4200, loss[loss=0.2186, simple_loss=0.2725, pruned_loss=0.08235, over 4862.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2576, pruned_loss=0.05961, over 954759.70 frames. ], batch size: 34, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:26:27,958 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5981, 2.5549, 2.0920, 2.8161, 2.5168, 2.3058, 3.1618, 2.6440], + device='cuda:5'), covar=tensor([0.1334, 0.2349, 0.3152, 0.2539, 0.2588, 0.1565, 0.2693, 0.1855], + device='cuda:5'), in_proj_covar=tensor([0.0179, 0.0186, 0.0233, 0.0252, 0.0243, 0.0198, 0.0211, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:26:37,997 INFO [finetune.py:976] (5/7) Epoch 14, batch 4250, loss[loss=0.1757, simple_loss=0.2378, pruned_loss=0.05681, over 4832.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2558, pruned_loss=0.05907, over 955992.41 frames. ], batch size: 39, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:27:05,885 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.090e+02 1.501e+02 1.722e+02 2.085e+02 5.543e+02, threshold=3.444e+02, percent-clipped=2.0 +2023-03-26 17:27:21,261 INFO [finetune.py:976] (5/7) Epoch 14, batch 4300, loss[loss=0.178, simple_loss=0.2504, pruned_loss=0.05284, over 4817.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2524, pruned_loss=0.05817, over 956273.62 frames. ], batch size: 41, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:27:22,656 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.69 vs. limit=5.0 +2023-03-26 17:27:48,557 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8201, 1.0093, 1.8822, 1.6887, 1.5832, 1.4907, 1.5693, 1.6785], + device='cuda:5'), covar=tensor([0.3178, 0.3488, 0.2832, 0.3221, 0.4071, 0.3273, 0.3714, 0.2682], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0239, 0.0256, 0.0266, 0.0265, 0.0238, 0.0278, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:27:59,985 INFO [finetune.py:976] (5/7) Epoch 14, batch 4350, loss[loss=0.1696, simple_loss=0.2373, pruned_loss=0.05095, over 4809.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2499, pruned_loss=0.05804, over 955033.48 frames. ], batch size: 45, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:28:06,804 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78816.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:28:09,024 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 17:28:34,426 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.179e+02 1.620e+02 1.919e+02 2.276e+02 4.946e+02, threshold=3.838e+02, percent-clipped=3.0 +2023-03-26 17:28:51,193 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9529, 1.9517, 2.0646, 1.4040, 2.0004, 2.0749, 2.0521, 1.6359], + device='cuda:5'), covar=tensor([0.0612, 0.0644, 0.0642, 0.0912, 0.0620, 0.0687, 0.0642, 0.1121], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0133, 0.0142, 0.0124, 0.0124, 0.0141, 0.0141, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:28:54,136 INFO [finetune.py:976] (5/7) Epoch 14, batch 4400, loss[loss=0.1702, simple_loss=0.2416, pruned_loss=0.04935, over 4796.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2513, pruned_loss=0.05877, over 954963.98 frames. ], batch size: 29, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:29:12,596 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78888.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:29:27,938 INFO [finetune.py:976] (5/7) Epoch 14, batch 4450, loss[loss=0.1735, simple_loss=0.2608, pruned_loss=0.0431, over 4846.00 frames. ], tot_loss[loss=0.1876, simple_loss=0.2551, pruned_loss=0.06005, over 955962.91 frames. ], batch size: 47, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:29:28,613 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78912.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:29:44,125 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=78936.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:29:48,696 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.623e+02 2.075e+02 2.454e+02 4.700e+02, threshold=4.150e+02, percent-clipped=3.0 +2023-03-26 17:30:01,642 INFO [finetune.py:976] (5/7) Epoch 14, batch 4500, loss[loss=0.2297, simple_loss=0.2974, pruned_loss=0.081, over 4815.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2572, pruned_loss=0.06042, over 957407.61 frames. ], batch size: 39, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:30:04,129 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78965.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:30:34,874 INFO [finetune.py:976] (5/7) Epoch 14, batch 4550, loss[loss=0.1909, simple_loss=0.2699, pruned_loss=0.05598, over 4902.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2577, pruned_loss=0.06012, over 956491.36 frames. ], batch size: 37, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:30:44,018 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79026.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:30:54,526 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.627e+02 1.835e+02 2.182e+02 4.419e+02, threshold=3.671e+02, percent-clipped=1.0 +2023-03-26 17:31:08,637 INFO [finetune.py:976] (5/7) Epoch 14, batch 4600, loss[loss=0.1873, simple_loss=0.2534, pruned_loss=0.06059, over 4893.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2564, pruned_loss=0.05928, over 955318.64 frames. ], batch size: 35, lr: 3.53e-03, grad_scale: 64.0 +2023-03-26 17:31:10,544 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7082, 1.9610, 1.6029, 1.6333, 2.2090, 2.2078, 1.9580, 1.8863], + device='cuda:5'), covar=tensor([0.0397, 0.0306, 0.0586, 0.0332, 0.0328, 0.0557, 0.0317, 0.0361], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0109, 0.0142, 0.0114, 0.0101, 0.0107, 0.0097, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3450e-05, 8.4525e-05, 1.1284e-04, 8.8880e-05, 7.8931e-05, 7.9485e-05, + 7.2734e-05, 8.3428e-05], device='cuda:5') +2023-03-26 17:31:42,444 INFO [finetune.py:976] (5/7) Epoch 14, batch 4650, loss[loss=0.189, simple_loss=0.2474, pruned_loss=0.06525, over 4862.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2542, pruned_loss=0.05894, over 955683.43 frames. ], batch size: 31, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:31:42,750 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.07 vs. limit=5.0 +2023-03-26 17:31:45,569 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79116.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:32:02,946 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.520e+02 1.886e+02 2.415e+02 4.094e+02, threshold=3.771e+02, percent-clipped=1.0 +2023-03-26 17:32:11,922 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9452, 1.8385, 1.6515, 2.0383, 2.3858, 2.0826, 1.7838, 1.5725], + device='cuda:5'), covar=tensor([0.2041, 0.1939, 0.1827, 0.1515, 0.1741, 0.1103, 0.2274, 0.1863], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0209, 0.0213, 0.0192, 0.0243, 0.0186, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:32:17,229 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 17:32:18,139 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4560, 1.3805, 1.9498, 2.9693, 2.0239, 2.1831, 1.1314, 2.3874], + device='cuda:5'), covar=tensor([0.2006, 0.1538, 0.1287, 0.0593, 0.0857, 0.1260, 0.1784, 0.0626], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0117, 0.0133, 0.0164, 0.0101, 0.0137, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:32:24,492 INFO [finetune.py:976] (5/7) Epoch 14, batch 4700, loss[loss=0.1721, simple_loss=0.2408, pruned_loss=0.0517, over 4912.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2512, pruned_loss=0.058, over 955883.79 frames. ], batch size: 32, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:32:26,328 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=79164.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:32:40,936 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 17:32:58,004 INFO [finetune.py:976] (5/7) Epoch 14, batch 4750, loss[loss=0.1939, simple_loss=0.2637, pruned_loss=0.0621, over 4935.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2508, pruned_loss=0.05843, over 955773.20 frames. ], batch size: 33, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:32:59,188 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79212.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:33:32,021 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.558e+01 1.654e+02 1.996e+02 2.359e+02 6.861e+02, threshold=3.993e+02, percent-clipped=2.0 +2023-03-26 17:33:51,146 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=79260.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:33:51,674 INFO [finetune.py:976] (5/7) Epoch 14, batch 4800, loss[loss=0.2575, simple_loss=0.3132, pruned_loss=0.1009, over 4746.00 frames. ], tot_loss[loss=0.1876, simple_loss=0.2545, pruned_loss=0.06034, over 955602.00 frames. ], batch size: 54, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:34:10,284 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79285.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:34:27,364 INFO [finetune.py:976] (5/7) Epoch 14, batch 4850, loss[loss=0.161, simple_loss=0.237, pruned_loss=0.04255, over 4893.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2557, pruned_loss=0.05965, over 957457.22 frames. ], batch size: 35, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:34:34,299 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9038, 1.7587, 1.9833, 1.1928, 1.8986, 1.9530, 1.9159, 1.5532], + device='cuda:5'), covar=tensor([0.0525, 0.0655, 0.0545, 0.0889, 0.0692, 0.0713, 0.0544, 0.1070], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0131, 0.0140, 0.0121, 0.0122, 0.0139, 0.0139, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:34:35,467 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79321.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:34:42,709 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79333.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:34:49,129 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.639e+02 1.937e+02 2.312e+02 4.640e+02, threshold=3.873e+02, percent-clipped=1.0 +2023-03-26 17:34:49,702 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-03-26 17:34:51,068 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79346.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:35:00,513 INFO [finetune.py:976] (5/7) Epoch 14, batch 4900, loss[loss=0.1976, simple_loss=0.2763, pruned_loss=0.0594, over 4723.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.258, pruned_loss=0.06094, over 954335.19 frames. ], batch size: 54, lr: 3.53e-03, grad_scale: 32.0 +2023-03-26 17:35:03,441 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79364.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:35:11,530 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79375.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 17:35:23,203 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79394.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:35:34,336 INFO [finetune.py:976] (5/7) Epoch 14, batch 4950, loss[loss=0.1757, simple_loss=0.2537, pruned_loss=0.04886, over 4813.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2587, pruned_loss=0.06085, over 953748.96 frames. ], batch size: 40, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:35:45,285 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79425.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:35:51,953 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79436.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:35:55,993 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.541e+02 1.877e+02 2.434e+02 3.585e+02, threshold=3.755e+02, percent-clipped=0.0 +2023-03-26 17:36:07,907 INFO [finetune.py:976] (5/7) Epoch 14, batch 5000, loss[loss=0.2193, simple_loss=0.2838, pruned_loss=0.07737, over 4812.00 frames. ], tot_loss[loss=0.1881, simple_loss=0.2562, pruned_loss=0.06004, over 952913.37 frames. ], batch size: 39, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:36:28,749 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.79 vs. limit=5.0 +2023-03-26 17:36:35,021 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6173, 0.6730, 1.7107, 1.5596, 1.4786, 1.3752, 1.4898, 1.5856], + device='cuda:5'), covar=tensor([0.3588, 0.4024, 0.3231, 0.3571, 0.4493, 0.3582, 0.4020, 0.2983], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0241, 0.0258, 0.0268, 0.0266, 0.0240, 0.0280, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:36:39,080 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5845, 3.7233, 3.4854, 1.6558, 3.7636, 2.7719, 0.9969, 2.5245], + device='cuda:5'), covar=tensor([0.2380, 0.1862, 0.1629, 0.3581, 0.1131, 0.1152, 0.4284, 0.1617], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0173, 0.0160, 0.0128, 0.0157, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 17:36:41,413 INFO [finetune.py:976] (5/7) Epoch 14, batch 5050, loss[loss=0.1482, simple_loss=0.2221, pruned_loss=0.03717, over 4718.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2538, pruned_loss=0.05912, over 955292.28 frames. ], batch size: 59, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:36:54,556 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.98 vs. limit=5.0 +2023-03-26 17:37:02,695 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.527e+02 1.776e+02 2.127e+02 3.568e+02, threshold=3.553e+02, percent-clipped=0.0 +2023-03-26 17:37:14,551 INFO [finetune.py:976] (5/7) Epoch 14, batch 5100, loss[loss=0.2323, simple_loss=0.283, pruned_loss=0.09079, over 4819.00 frames. ], tot_loss[loss=0.1824, simple_loss=0.25, pruned_loss=0.05742, over 957498.60 frames. ], batch size: 38, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:37:16,718 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.16 vs. limit=5.0 +2023-03-26 17:37:41,637 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79585.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:37:57,945 INFO [finetune.py:976] (5/7) Epoch 14, batch 5150, loss[loss=0.1437, simple_loss=0.223, pruned_loss=0.03219, over 4756.00 frames. ], tot_loss[loss=0.1831, simple_loss=0.2507, pruned_loss=0.05781, over 956793.90 frames. ], batch size: 28, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:38:04,636 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79621.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:38:20,366 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79641.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:38:21,529 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.601e+02 1.924e+02 2.369e+02 3.228e+02, threshold=3.849e+02, percent-clipped=0.0 +2023-03-26 17:38:23,474 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79646.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:38:38,691 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79657.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:38:41,521 INFO [finetune.py:976] (5/7) Epoch 14, batch 5200, loss[loss=0.192, simple_loss=0.2685, pruned_loss=0.05774, over 4912.00 frames. ], tot_loss[loss=0.1878, simple_loss=0.2553, pruned_loss=0.06009, over 954981.35 frames. ], batch size: 36, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:38:50,980 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=79669.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:39:02,937 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9694, 1.5321, 0.9482, 1.8095, 2.2684, 1.5504, 1.7896, 1.7252], + device='cuda:5'), covar=tensor([0.1405, 0.1952, 0.2028, 0.1193, 0.1828, 0.1907, 0.1396, 0.2028], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0092, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:39:10,039 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79689.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:39:27,299 INFO [finetune.py:976] (5/7) Epoch 14, batch 5250, loss[loss=0.1798, simple_loss=0.2462, pruned_loss=0.05672, over 4912.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2564, pruned_loss=0.06025, over 952920.95 frames. ], batch size: 37, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:39:32,145 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79718.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:39:33,309 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79720.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:39:40,420 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79731.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 17:39:49,075 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.058e+02 1.683e+02 1.987e+02 2.478e+02 3.642e+02, threshold=3.974e+02, percent-clipped=0.0 +2023-03-26 17:39:59,973 INFO [finetune.py:976] (5/7) Epoch 14, batch 5300, loss[loss=0.2163, simple_loss=0.2939, pruned_loss=0.06932, over 4886.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2575, pruned_loss=0.06095, over 951376.52 frames. ], batch size: 43, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:40:00,725 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79762.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:40:07,781 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7838, 1.2050, 0.9222, 1.5926, 2.0932, 1.2993, 1.6397, 1.5859], + device='cuda:5'), covar=tensor([0.1405, 0.2032, 0.1804, 0.1207, 0.1819, 0.1929, 0.1355, 0.1869], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0092, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:40:33,377 INFO [finetune.py:976] (5/7) Epoch 14, batch 5350, loss[loss=0.2151, simple_loss=0.2794, pruned_loss=0.07541, over 4735.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2572, pruned_loss=0.06014, over 950888.88 frames. ], batch size: 54, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:40:41,366 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79823.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:40:55,394 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.090e+02 1.503e+02 1.776e+02 2.291e+02 4.117e+02, threshold=3.553e+02, percent-clipped=3.0 +2023-03-26 17:41:06,813 INFO [finetune.py:976] (5/7) Epoch 14, batch 5400, loss[loss=0.1674, simple_loss=0.2394, pruned_loss=0.04766, over 4757.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2561, pruned_loss=0.0602, over 950961.74 frames. ], batch size: 27, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:41:40,251 INFO [finetune.py:976] (5/7) Epoch 14, batch 5450, loss[loss=0.14, simple_loss=0.2144, pruned_loss=0.03282, over 4761.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2541, pruned_loss=0.05952, over 953002.30 frames. ], batch size: 27, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:41:43,563 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.99 vs. limit=5.0 +2023-03-26 17:41:59,678 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79941.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:41:59,695 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79941.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 17:42:00,808 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.485e+02 1.773e+02 2.175e+02 4.141e+02, threshold=3.546e+02, percent-clipped=3.0 +2023-03-26 17:42:14,250 INFO [finetune.py:976] (5/7) Epoch 14, batch 5500, loss[loss=0.1816, simple_loss=0.2496, pruned_loss=0.0568, over 4893.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2511, pruned_loss=0.05808, over 952854.06 frames. ], batch size: 35, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:42:32,376 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=79989.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:42:32,393 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79989.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:42:54,668 INFO [finetune.py:976] (5/7) Epoch 14, batch 5550, loss[loss=0.1741, simple_loss=0.2584, pruned_loss=0.04487, over 4812.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2521, pruned_loss=0.05805, over 954166.07 frames. ], batch size: 41, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:42:55,987 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80013.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:43:00,269 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80020.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:43:07,429 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80031.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 17:43:11,544 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=80037.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:43:15,066 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.609e+02 1.862e+02 2.441e+02 4.163e+02, threshold=3.724e+02, percent-clipped=2.0 +2023-03-26 17:43:25,569 INFO [finetune.py:976] (5/7) Epoch 14, batch 5600, loss[loss=0.2072, simple_loss=0.273, pruned_loss=0.07071, over 4867.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2542, pruned_loss=0.05858, over 952436.66 frames. ], batch size: 31, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:43:29,673 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=80068.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:43:32,062 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8600, 1.7534, 1.6398, 1.9416, 2.0768, 1.9448, 1.4169, 1.6320], + device='cuda:5'), covar=tensor([0.1961, 0.1818, 0.1773, 0.1481, 0.1446, 0.1019, 0.2285, 0.1815], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0207, 0.0211, 0.0191, 0.0241, 0.0185, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:43:36,044 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=80079.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 17:44:12,236 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8207, 1.2649, 1.8314, 1.7458, 1.5443, 1.5158, 1.7258, 1.6371], + device='cuda:5'), covar=tensor([0.3178, 0.3799, 0.2940, 0.3311, 0.4313, 0.3635, 0.3926, 0.2919], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0240, 0.0257, 0.0267, 0.0266, 0.0239, 0.0280, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:44:12,673 INFO [finetune.py:976] (5/7) Epoch 14, batch 5650, loss[loss=0.2452, simple_loss=0.2981, pruned_loss=0.09617, over 4759.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.258, pruned_loss=0.05991, over 953449.28 frames. ], batch size: 59, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:44:21,807 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80118.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:44:30,243 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1963, 2.5550, 2.3803, 1.8626, 2.3570, 2.7286, 2.4903, 2.2128], + device='cuda:5'), covar=tensor([0.0681, 0.0611, 0.0756, 0.0882, 0.0724, 0.0735, 0.0675, 0.0975], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0132, 0.0141, 0.0123, 0.0124, 0.0141, 0.0141, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:44:30,841 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8285, 1.8755, 1.9446, 1.2400, 2.0189, 1.9592, 1.9152, 1.6742], + device='cuda:5'), covar=tensor([0.0631, 0.0642, 0.0676, 0.0937, 0.0964, 0.0787, 0.0664, 0.1098], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0132, 0.0141, 0.0123, 0.0124, 0.0141, 0.0141, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:44:41,677 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.115e+02 1.565e+02 1.838e+02 2.201e+02 4.652e+02, threshold=3.676e+02, percent-clipped=2.0 +2023-03-26 17:44:56,309 INFO [finetune.py:976] (5/7) Epoch 14, batch 5700, loss[loss=0.1871, simple_loss=0.2447, pruned_loss=0.06476, over 4383.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2546, pruned_loss=0.05935, over 936176.03 frames. ], batch size: 19, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:45:28,233 INFO [finetune.py:976] (5/7) Epoch 15, batch 0, loss[loss=0.1797, simple_loss=0.2473, pruned_loss=0.05609, over 4874.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2473, pruned_loss=0.05609, over 4874.00 frames. ], batch size: 35, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:45:28,233 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 17:45:30,656 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3069, 2.0758, 1.4911, 0.6030, 1.8889, 1.9178, 1.7980, 1.9353], + device='cuda:5'), covar=tensor([0.0852, 0.0899, 0.1685, 0.2135, 0.1448, 0.2609, 0.2375, 0.0853], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0195, 0.0200, 0.0183, 0.0213, 0.0207, 0.0224, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:45:42,542 INFO [finetune.py:1010] (5/7) Epoch 15, validation: loss=0.1586, simple_loss=0.2288, pruned_loss=0.0442, over 2265189.00 frames. +2023-03-26 17:45:42,542 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 17:45:42,681 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3036, 2.3746, 1.7734, 2.6168, 2.3386, 1.9449, 2.8138, 2.4025], + device='cuda:5'), covar=tensor([0.1393, 0.2315, 0.3203, 0.2615, 0.2437, 0.1634, 0.3012, 0.1809], + device='cuda:5'), in_proj_covar=tensor([0.0180, 0.0187, 0.0234, 0.0255, 0.0245, 0.0200, 0.0213, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:46:12,249 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1232, 1.3653, 0.6895, 2.0195, 2.2990, 1.8217, 1.6893, 1.9786], + device='cuda:5'), covar=tensor([0.1300, 0.1965, 0.2310, 0.1083, 0.1886, 0.1998, 0.1368, 0.1793], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0093, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:46:15,191 INFO [finetune.py:976] (5/7) Epoch 15, batch 50, loss[loss=0.1693, simple_loss=0.2469, pruned_loss=0.04588, over 4805.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2597, pruned_loss=0.06234, over 212365.61 frames. ], batch size: 41, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:46:17,641 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80241.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:46:17,693 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0179, 1.9481, 1.3954, 1.9616, 2.0333, 1.6855, 2.6438, 1.9837], + device='cuda:5'), covar=tensor([0.1486, 0.2123, 0.3691, 0.3143, 0.2992, 0.1855, 0.2820, 0.2108], + device='cuda:5'), in_proj_covar=tensor([0.0180, 0.0187, 0.0234, 0.0254, 0.0244, 0.0199, 0.0213, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:46:18,754 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.025e+02 1.470e+02 1.896e+02 2.201e+02 3.299e+02, threshold=3.792e+02, percent-clipped=0.0 +2023-03-26 17:46:21,724 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8038, 1.7162, 1.6244, 1.7145, 1.3448, 3.8128, 1.6823, 2.0531], + device='cuda:5'), covar=tensor([0.3500, 0.2633, 0.2221, 0.2498, 0.1843, 0.0208, 0.2393, 0.1242], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0115, 0.0098, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:46:37,520 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 17:46:45,391 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80283.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:46:48,317 INFO [finetune.py:976] (5/7) Epoch 15, batch 100, loss[loss=0.2096, simple_loss=0.2721, pruned_loss=0.07355, over 4877.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2538, pruned_loss=0.06014, over 376665.08 frames. ], batch size: 34, lr: 3.52e-03, grad_scale: 32.0 +2023-03-26 17:46:48,986 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=80289.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:47:05,104 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80313.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:47:21,594 INFO [finetune.py:976] (5/7) Epoch 15, batch 150, loss[loss=0.1807, simple_loss=0.2454, pruned_loss=0.05797, over 4786.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2494, pruned_loss=0.05938, over 505445.60 frames. ], batch size: 28, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:47:21,705 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7825, 1.6928, 1.5823, 1.6958, 1.1441, 3.6903, 1.4877, 1.8347], + device='cuda:5'), covar=tensor([0.3231, 0.2430, 0.2012, 0.2480, 0.1830, 0.0152, 0.2875, 0.1253], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:47:25,133 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.875e+01 1.589e+02 1.860e+02 2.189e+02 4.694e+02, threshold=3.721e+02, percent-clipped=1.0 +2023-03-26 17:47:25,880 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80344.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:47:27,419 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.46 vs. limit=5.0 +2023-03-26 17:47:35,002 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-03-26 17:47:35,509 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6779, 1.5921, 1.5265, 1.6012, 1.0463, 3.3132, 1.2356, 1.5940], + device='cuda:5'), covar=tensor([0.3173, 0.2322, 0.2029, 0.2366, 0.1912, 0.0207, 0.2474, 0.1305], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0115, 0.0098, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:47:37,133 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=80361.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:47:54,530 INFO [finetune.py:976] (5/7) Epoch 15, batch 200, loss[loss=0.1651, simple_loss=0.2377, pruned_loss=0.0462, over 4876.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2503, pruned_loss=0.05994, over 606184.35 frames. ], batch size: 34, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:47:58,219 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80393.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:48:16,689 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80418.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:48:21,563 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8189, 2.0250, 1.6398, 1.6385, 2.2959, 2.2491, 2.0628, 1.9512], + device='cuda:5'), covar=tensor([0.0394, 0.0313, 0.0569, 0.0334, 0.0273, 0.0555, 0.0305, 0.0362], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0141, 0.0113, 0.0101, 0.0107, 0.0097, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.2657e-05, 8.4133e-05, 1.1206e-04, 8.7984e-05, 7.8603e-05, 7.9206e-05, + 7.2886e-05, 8.3018e-05], device='cuda:5') +2023-03-26 17:48:31,521 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-26 17:48:34,169 INFO [finetune.py:976] (5/7) Epoch 15, batch 250, loss[loss=0.1367, simple_loss=0.2056, pruned_loss=0.03391, over 4769.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2526, pruned_loss=0.06027, over 683515.13 frames. ], batch size: 26, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:48:37,161 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.638e+02 2.049e+02 2.410e+02 5.367e+02, threshold=4.098e+02, percent-clipped=2.0 +2023-03-26 17:48:48,450 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80454.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:48:58,618 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80463.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:49:00,361 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=80466.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:49:20,046 INFO [finetune.py:976] (5/7) Epoch 15, batch 300, loss[loss=0.1502, simple_loss=0.2147, pruned_loss=0.04288, over 4166.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2564, pruned_loss=0.0613, over 742666.76 frames. ], batch size: 18, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:49:24,794 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80494.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:49:45,515 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6146, 1.5516, 2.1609, 1.9155, 1.8424, 4.2263, 1.5331, 1.6955], + device='cuda:5'), covar=tensor([0.0951, 0.1808, 0.1186, 0.0983, 0.1618, 0.0240, 0.1515, 0.1742], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0074, 0.0077, 0.0092, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 17:49:45,543 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8660, 2.0610, 1.6968, 1.7846, 2.3874, 2.3010, 1.9871, 1.8830], + device='cuda:5'), covar=tensor([0.0364, 0.0329, 0.0504, 0.0342, 0.0235, 0.0549, 0.0323, 0.0390], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0109, 0.0142, 0.0114, 0.0101, 0.0108, 0.0097, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.2916e-05, 8.4561e-05, 1.1259e-04, 8.8481e-05, 7.8914e-05, 7.9565e-05, + 7.3079e-05, 8.3489e-05], device='cuda:5') +2023-03-26 17:50:03,919 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80524.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:50:10,122 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 17:50:12,917 INFO [finetune.py:976] (5/7) Epoch 15, batch 350, loss[loss=0.1906, simple_loss=0.2596, pruned_loss=0.06084, over 4891.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2572, pruned_loss=0.06103, over 789699.54 frames. ], batch size: 32, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:50:16,427 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.511e+02 1.809e+02 2.185e+02 3.892e+02, threshold=3.618e+02, percent-clipped=0.0 +2023-03-26 17:50:24,821 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80555.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:50:31,856 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9432, 4.5888, 4.3397, 2.1608, 4.6061, 3.4960, 1.1895, 3.2175], + device='cuda:5'), covar=tensor([0.2395, 0.1555, 0.1187, 0.3202, 0.0752, 0.0882, 0.3930, 0.1176], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0173, 0.0159, 0.0128, 0.0156, 0.0122, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 17:50:47,436 INFO [finetune.py:976] (5/7) Epoch 15, batch 400, loss[loss=0.1881, simple_loss=0.2556, pruned_loss=0.06032, over 4811.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2583, pruned_loss=0.06079, over 827090.01 frames. ], batch size: 33, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:51:29,120 INFO [finetune.py:976] (5/7) Epoch 15, batch 450, loss[loss=0.2186, simple_loss=0.2786, pruned_loss=0.07928, over 4822.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2569, pruned_loss=0.06054, over 855696.11 frames. ], batch size: 39, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:51:29,784 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80639.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:51:32,679 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.016e+02 1.580e+02 1.854e+02 2.177e+02 4.594e+02, threshold=3.707e+02, percent-clipped=2.0 +2023-03-26 17:52:01,309 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8715, 4.2049, 4.0021, 2.1410, 4.2881, 3.4178, 1.1407, 3.1274], + device='cuda:5'), covar=tensor([0.2706, 0.1626, 0.1361, 0.3144, 0.0767, 0.0839, 0.4074, 0.1435], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0174, 0.0160, 0.0128, 0.0157, 0.0123, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 17:52:03,116 INFO [finetune.py:976] (5/7) Epoch 15, batch 500, loss[loss=0.1798, simple_loss=0.2382, pruned_loss=0.06074, over 4183.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.2534, pruned_loss=0.05892, over 877631.66 frames. ], batch size: 18, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:52:06,793 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4256, 2.3204, 2.0154, 2.1466, 2.3420, 2.1088, 2.5214, 2.4071], + device='cuda:5'), covar=tensor([0.1336, 0.1829, 0.2900, 0.2379, 0.2480, 0.1679, 0.3130, 0.1666], + device='cuda:5'), in_proj_covar=tensor([0.0179, 0.0186, 0.0232, 0.0252, 0.0243, 0.0198, 0.0211, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 17:52:36,001 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80736.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:52:37,124 INFO [finetune.py:976] (5/7) Epoch 15, batch 550, loss[loss=0.1797, simple_loss=0.2479, pruned_loss=0.05576, over 4748.00 frames. ], tot_loss[loss=0.1831, simple_loss=0.2504, pruned_loss=0.05787, over 895855.30 frames. ], batch size: 59, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:52:40,203 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.496e+02 1.725e+02 2.011e+02 3.976e+02, threshold=3.451e+02, percent-clipped=1.0 +2023-03-26 17:52:44,397 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80749.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:53:01,305 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80773.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:53:10,747 INFO [finetune.py:976] (5/7) Epoch 15, batch 600, loss[loss=0.1934, simple_loss=0.2462, pruned_loss=0.07029, over 4912.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2523, pruned_loss=0.0592, over 910539.29 frames. ], batch size: 32, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:53:16,800 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80797.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:53:32,697 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80819.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:53:44,659 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80834.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:53:47,025 INFO [finetune.py:976] (5/7) Epoch 15, batch 650, loss[loss=0.166, simple_loss=0.2389, pruned_loss=0.04655, over 4753.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2556, pruned_loss=0.0606, over 920335.66 frames. ], batch size: 27, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:53:50,575 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.643e+02 1.965e+02 2.358e+02 6.399e+02, threshold=3.929e+02, percent-clipped=5.0 +2023-03-26 17:53:54,517 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 17:53:55,337 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80850.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:53:58,965 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3365, 1.3473, 1.4745, 0.7750, 1.4647, 1.5725, 1.6972, 1.3508], + device='cuda:5'), covar=tensor([0.0951, 0.0760, 0.0571, 0.0542, 0.0456, 0.0696, 0.0373, 0.0760], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0152, 0.0123, 0.0129, 0.0130, 0.0127, 0.0142, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.2963e-05, 1.1072e-04, 8.8241e-05, 9.2849e-05, 9.2193e-05, 9.2095e-05, + 1.0244e-04, 1.0502e-04], device='cuda:5') +2023-03-26 17:54:21,597 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-03-26 17:54:25,921 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.12 vs. limit=5.0 +2023-03-26 17:54:29,238 INFO [finetune.py:976] (5/7) Epoch 15, batch 700, loss[loss=0.2664, simple_loss=0.3059, pruned_loss=0.1134, over 4901.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.257, pruned_loss=0.06094, over 929078.83 frames. ], batch size: 35, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:55:23,118 INFO [finetune.py:976] (5/7) Epoch 15, batch 750, loss[loss=0.1988, simple_loss=0.2688, pruned_loss=0.06444, over 4859.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.258, pruned_loss=0.06128, over 934831.73 frames. ], batch size: 34, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:55:23,797 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80939.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:55:26,160 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.628e+02 1.856e+02 2.303e+02 3.612e+02, threshold=3.712e+02, percent-clipped=0.0 +2023-03-26 17:55:56,365 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=80987.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:55:56,907 INFO [finetune.py:976] (5/7) Epoch 15, batch 800, loss[loss=0.1765, simple_loss=0.2507, pruned_loss=0.05118, over 4919.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2583, pruned_loss=0.06064, over 939292.82 frames. ], batch size: 33, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:56:09,351 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.31 vs. limit=5.0 +2023-03-26 17:56:38,265 INFO [finetune.py:976] (5/7) Epoch 15, batch 850, loss[loss=0.2087, simple_loss=0.2719, pruned_loss=0.07274, over 4269.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2555, pruned_loss=0.05947, over 941206.18 frames. ], batch size: 65, lr: 3.51e-03, grad_scale: 32.0 +2023-03-26 17:56:41,289 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.679e+02 1.976e+02 2.340e+02 3.768e+02, threshold=3.952e+02, percent-clipped=2.0 +2023-03-26 17:56:44,993 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81049.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:57:11,961 INFO [finetune.py:976] (5/7) Epoch 15, batch 900, loss[loss=0.2179, simple_loss=0.2713, pruned_loss=0.08227, over 4918.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2528, pruned_loss=0.05867, over 947311.02 frames. ], batch size: 43, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 17:57:13,947 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 17:57:14,449 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81092.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:57:16,894 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5332, 1.4019, 2.0034, 2.8657, 1.9483, 2.1370, 1.2153, 2.3728], + device='cuda:5'), covar=tensor([0.1774, 0.1447, 0.1147, 0.0677, 0.0810, 0.1590, 0.1542, 0.0567], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0100, 0.0136, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 17:57:17,458 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=81097.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:57:29,857 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81116.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:57:31,668 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81119.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:57:38,665 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81129.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:57:45,615 INFO [finetune.py:976] (5/7) Epoch 15, batch 950, loss[loss=0.1535, simple_loss=0.2253, pruned_loss=0.04091, over 4862.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2513, pruned_loss=0.05838, over 950087.91 frames. ], batch size: 31, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 17:57:48,668 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.129e+01 1.456e+02 1.848e+02 2.216e+02 5.430e+02, threshold=3.695e+02, percent-clipped=2.0 +2023-03-26 17:57:52,983 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81150.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:58:03,768 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=81167.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:58:11,294 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81177.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:58:19,389 INFO [finetune.py:976] (5/7) Epoch 15, batch 1000, loss[loss=0.2409, simple_loss=0.3065, pruned_loss=0.08766, over 4746.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2533, pruned_loss=0.05885, over 949473.27 frames. ], batch size: 59, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 17:58:25,511 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=81198.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:58:44,292 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.37 vs. limit=5.0 +2023-03-26 17:58:52,891 INFO [finetune.py:976] (5/7) Epoch 15, batch 1050, loss[loss=0.153, simple_loss=0.2353, pruned_loss=0.03533, over 4834.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2551, pruned_loss=0.05864, over 950902.29 frames. ], batch size: 47, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 17:58:56,386 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.183e+02 1.566e+02 1.800e+02 2.282e+02 3.514e+02, threshold=3.601e+02, percent-clipped=0.0 +2023-03-26 17:59:31,697 INFO [finetune.py:976] (5/7) Epoch 15, batch 1100, loss[loss=0.2495, simple_loss=0.3133, pruned_loss=0.09286, over 4886.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2584, pruned_loss=0.06013, over 953820.49 frames. ], batch size: 35, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 17:59:43,343 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81299.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 17:59:49,389 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81301.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:00:05,528 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-26 18:00:16,532 INFO [finetune.py:976] (5/7) Epoch 15, batch 1150, loss[loss=0.1762, simple_loss=0.2484, pruned_loss=0.05196, over 4863.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2584, pruned_loss=0.05998, over 954563.41 frames. ], batch size: 34, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 18:00:23,242 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.635e+02 2.084e+02 2.407e+02 3.907e+02, threshold=4.168e+02, percent-clipped=1.0 +2023-03-26 18:00:41,629 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81360.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:00:42,313 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 18:00:42,877 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81362.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:00:59,907 INFO [finetune.py:976] (5/7) Epoch 15, batch 1200, loss[loss=0.1766, simple_loss=0.2483, pruned_loss=0.05241, over 4810.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2568, pruned_loss=0.05951, over 955625.44 frames. ], batch size: 40, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 18:01:03,396 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81392.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:01:27,559 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81429.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:01:35,103 INFO [finetune.py:976] (5/7) Epoch 15, batch 1250, loss[loss=0.1596, simple_loss=0.2239, pruned_loss=0.04767, over 4853.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2544, pruned_loss=0.059, over 955334.38 frames. ], batch size: 44, lr: 3.51e-03, grad_scale: 64.0 +2023-03-26 18:01:40,098 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=81440.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:01:42,323 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.103e+01 1.546e+02 1.830e+02 2.259e+02 3.665e+02, threshold=3.660e+02, percent-clipped=0.0 +2023-03-26 18:01:44,696 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4745, 1.5217, 1.7987, 1.7248, 1.7164, 3.5267, 1.4775, 1.5749], + device='cuda:5'), covar=tensor([0.1057, 0.1769, 0.1222, 0.1047, 0.1496, 0.0237, 0.1437, 0.1760], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0074, 0.0078, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:01:53,446 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.9562, 3.5213, 3.6759, 3.6444, 3.5682, 3.3256, 4.0172, 1.3492], + device='cuda:5'), covar=tensor([0.1313, 0.1828, 0.1757, 0.2088, 0.2035, 0.2390, 0.1377, 0.7907], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0242, 0.0271, 0.0289, 0.0328, 0.0280, 0.0295, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:01:58,445 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-03-26 18:02:05,461 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81472.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:02:08,459 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=81477.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:02:15,566 INFO [finetune.py:976] (5/7) Epoch 15, batch 1300, loss[loss=0.158, simple_loss=0.2296, pruned_loss=0.04314, over 4911.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.2509, pruned_loss=0.05735, over 957367.10 frames. ], batch size: 36, lr: 3.50e-03, grad_scale: 64.0 +2023-03-26 18:02:49,394 INFO [finetune.py:976] (5/7) Epoch 15, batch 1350, loss[loss=0.1579, simple_loss=0.2325, pruned_loss=0.04161, over 4832.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2505, pruned_loss=0.05744, over 956221.33 frames. ], batch size: 30, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:02:53,476 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.608e+02 1.859e+02 2.257e+02 3.880e+02, threshold=3.719e+02, percent-clipped=1.0 +2023-03-26 18:03:03,002 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4031, 1.1995, 1.2549, 0.6766, 1.3174, 1.5445, 1.5001, 1.1964], + device='cuda:5'), covar=tensor([0.1128, 0.0911, 0.0602, 0.0711, 0.0554, 0.0625, 0.0424, 0.0852], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0152, 0.0123, 0.0129, 0.0130, 0.0127, 0.0142, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.3143e-05, 1.1053e-04, 8.8604e-05, 9.2755e-05, 9.2377e-05, 9.2004e-05, + 1.0281e-04, 1.0553e-04], device='cuda:5') +2023-03-26 18:03:22,723 INFO [finetune.py:976] (5/7) Epoch 15, batch 1400, loss[loss=0.2254, simple_loss=0.2874, pruned_loss=0.08172, over 4812.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2538, pruned_loss=0.05882, over 956044.93 frames. ], batch size: 41, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:03:44,079 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9084, 2.5981, 2.1583, 1.1759, 2.2750, 2.2598, 2.0215, 2.2570], + device='cuda:5'), covar=tensor([0.0782, 0.0750, 0.1490, 0.1975, 0.1543, 0.2116, 0.2010, 0.0938], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0194, 0.0199, 0.0184, 0.0213, 0.0207, 0.0225, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:03:45,296 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2023, 2.1315, 1.7806, 0.8717, 1.8468, 1.7742, 1.5773, 1.8730], + device='cuda:5'), covar=tensor([0.0967, 0.0712, 0.1360, 0.1942, 0.1402, 0.2187, 0.2272, 0.0900], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0194, 0.0199, 0.0184, 0.0213, 0.0207, 0.0225, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:03:48,207 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3666, 1.4995, 1.5778, 1.6330, 1.7152, 3.2148, 1.4373, 1.5576], + device='cuda:5'), covar=tensor([0.1022, 0.1732, 0.1122, 0.0943, 0.1430, 0.0237, 0.1341, 0.1684], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0078, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:03:56,016 INFO [finetune.py:976] (5/7) Epoch 15, batch 1450, loss[loss=0.2033, simple_loss=0.2671, pruned_loss=0.06973, over 4804.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2558, pruned_loss=0.05908, over 956436.89 frames. ], batch size: 45, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:04:00,101 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.620e+02 1.887e+02 2.237e+02 3.719e+02, threshold=3.774e+02, percent-clipped=1.0 +2023-03-26 18:04:07,949 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81655.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:04:09,576 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81657.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:04:18,073 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81670.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:04:29,492 INFO [finetune.py:976] (5/7) Epoch 15, batch 1500, loss[loss=0.19, simple_loss=0.2686, pruned_loss=0.05574, over 4908.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2581, pruned_loss=0.06026, over 953689.75 frames. ], batch size: 36, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:04:33,112 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5994, 1.5469, 1.5248, 0.9346, 1.6561, 1.8458, 1.8440, 1.3833], + device='cuda:5'), covar=tensor([0.0883, 0.0646, 0.0539, 0.0590, 0.0442, 0.0546, 0.0298, 0.0707], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0151, 0.0123, 0.0128, 0.0129, 0.0127, 0.0141, 0.0145], + device='cuda:5'), out_proj_covar=tensor([9.2303e-05, 1.0984e-04, 8.8160e-05, 9.2050e-05, 9.1679e-05, 9.1554e-05, + 1.0228e-04, 1.0462e-04], device='cuda:5') +2023-03-26 18:05:16,707 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81731.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 18:05:20,804 INFO [finetune.py:976] (5/7) Epoch 15, batch 1550, loss[loss=0.1503, simple_loss=0.2134, pruned_loss=0.04361, over 4879.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2576, pruned_loss=0.06028, over 953816.36 frames. ], batch size: 43, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:05:24,955 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.530e+02 1.898e+02 2.293e+02 4.636e+02, threshold=3.795e+02, percent-clipped=1.0 +2023-03-26 18:05:50,072 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81772.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:06:03,757 INFO [finetune.py:976] (5/7) Epoch 15, batch 1600, loss[loss=0.1941, simple_loss=0.2499, pruned_loss=0.06911, over 4736.00 frames. ], tot_loss[loss=0.1881, simple_loss=0.2562, pruned_loss=0.06004, over 954595.54 frames. ], batch size: 59, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:06:10,236 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 18:06:25,253 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8388, 1.1678, 1.9359, 1.7244, 1.5907, 1.5196, 1.6334, 1.7242], + device='cuda:5'), covar=tensor([0.3521, 0.3893, 0.3016, 0.3643, 0.4654, 0.3589, 0.4127, 0.2993], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0238, 0.0256, 0.0266, 0.0265, 0.0238, 0.0280, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:06:25,748 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=81820.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:06:35,460 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7606, 2.4559, 2.0241, 2.7841, 2.6076, 2.3474, 3.2099, 2.6783], + device='cuda:5'), covar=tensor([0.1242, 0.2423, 0.3129, 0.2633, 0.2696, 0.1648, 0.2567, 0.1844], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0188, 0.0236, 0.0256, 0.0247, 0.0201, 0.0215, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:06:37,142 INFO [finetune.py:976] (5/7) Epoch 15, batch 1650, loss[loss=0.1554, simple_loss=0.2248, pruned_loss=0.04297, over 4904.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2533, pruned_loss=0.05911, over 955298.25 frames. ], batch size: 35, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:06:40,762 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.362e+01 1.564e+02 1.826e+02 2.251e+02 4.924e+02, threshold=3.651e+02, percent-clipped=3.0 +2023-03-26 18:07:18,076 INFO [finetune.py:976] (5/7) Epoch 15, batch 1700, loss[loss=0.2196, simple_loss=0.2835, pruned_loss=0.07787, over 4818.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2511, pruned_loss=0.05805, over 956305.99 frames. ], batch size: 39, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:07:30,611 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0807, 1.9205, 1.8051, 1.9152, 1.5284, 4.6188, 1.8162, 2.3004], + device='cuda:5'), covar=tensor([0.3123, 0.2420, 0.2024, 0.2316, 0.1659, 0.0113, 0.2416, 0.1247], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0115, 0.0098, 0.0097, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:07:32,820 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.79 vs. limit=5.0 +2023-03-26 18:07:38,831 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9526, 4.6327, 4.3735, 2.5802, 4.7031, 3.7031, 0.7396, 3.2148], + device='cuda:5'), covar=tensor([0.2459, 0.2189, 0.1353, 0.3162, 0.0783, 0.0855, 0.5098, 0.1514], + device='cuda:5'), in_proj_covar=tensor([0.0149, 0.0173, 0.0157, 0.0127, 0.0156, 0.0121, 0.0144, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 18:07:51,480 INFO [finetune.py:976] (5/7) Epoch 15, batch 1750, loss[loss=0.1568, simple_loss=0.2343, pruned_loss=0.03967, over 4740.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2527, pruned_loss=0.05851, over 956477.26 frames. ], batch size: 26, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:07:55,585 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.016e+02 1.510e+02 1.914e+02 2.293e+02 4.004e+02, threshold=3.828e+02, percent-clipped=1.0 +2023-03-26 18:08:02,955 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81955.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:08:03,608 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6349, 1.4510, 1.0386, 0.2825, 1.2038, 1.4293, 1.3267, 1.3311], + device='cuda:5'), covar=tensor([0.0917, 0.0852, 0.1401, 0.1961, 0.1400, 0.2275, 0.2302, 0.0957], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0194, 0.0200, 0.0184, 0.0213, 0.0207, 0.0224, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:08:04,188 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81957.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:08:24,932 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81987.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:08:25,413 INFO [finetune.py:976] (5/7) Epoch 15, batch 1800, loss[loss=0.1858, simple_loss=0.2621, pruned_loss=0.05469, over 4849.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2565, pruned_loss=0.05945, over 956776.67 frames. ], batch size: 49, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:08:36,208 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=82003.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:08:37,855 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=82005.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:08:52,972 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82026.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 18:09:00,021 INFO [finetune.py:976] (5/7) Epoch 15, batch 1850, loss[loss=0.2274, simple_loss=0.2865, pruned_loss=0.08415, over 4832.00 frames. ], tot_loss[loss=0.189, simple_loss=0.258, pruned_loss=0.06, over 956708.96 frames. ], batch size: 47, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:09:03,673 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.098e+02 1.664e+02 1.894e+02 2.440e+02 3.763e+02, threshold=3.787e+02, percent-clipped=0.0 +2023-03-26 18:09:06,698 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82048.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:09:11,037 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82055.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:09:33,283 INFO [finetune.py:976] (5/7) Epoch 15, batch 1900, loss[loss=0.2439, simple_loss=0.302, pruned_loss=0.0929, over 4891.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2589, pruned_loss=0.06003, over 957810.45 frames. ], batch size: 35, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:09:51,826 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82116.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:10:04,454 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-26 18:10:16,117 INFO [finetune.py:976] (5/7) Epoch 15, batch 1950, loss[loss=0.1864, simple_loss=0.2627, pruned_loss=0.05504, over 4816.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2567, pruned_loss=0.05916, over 958061.87 frames. ], batch size: 41, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:10:24,222 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.964e+01 1.525e+02 1.906e+02 2.226e+02 4.434e+02, threshold=3.812e+02, percent-clipped=2.0 +2023-03-26 18:10:34,844 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 18:10:50,654 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.22 vs. limit=5.0 +2023-03-26 18:11:01,528 INFO [finetune.py:976] (5/7) Epoch 15, batch 2000, loss[loss=0.2362, simple_loss=0.2925, pruned_loss=0.08993, over 4824.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2554, pruned_loss=0.05981, over 958398.06 frames. ], batch size: 33, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:11:01,873 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 18:11:33,610 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 18:11:38,381 INFO [finetune.py:976] (5/7) Epoch 15, batch 2050, loss[loss=0.2053, simple_loss=0.2617, pruned_loss=0.07448, over 4127.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2508, pruned_loss=0.05775, over 958401.54 frames. ], batch size: 65, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:11:42,513 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.417e+02 1.727e+02 2.274e+02 4.171e+02, threshold=3.454e+02, percent-clipped=1.0 +2023-03-26 18:12:24,921 INFO [finetune.py:976] (5/7) Epoch 15, batch 2100, loss[loss=0.202, simple_loss=0.2744, pruned_loss=0.06487, over 4822.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2501, pruned_loss=0.05788, over 957685.10 frames. ], batch size: 39, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:12:54,205 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82326.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:12:57,374 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3657, 1.9560, 2.2451, 2.2574, 1.9930, 2.0225, 2.2043, 2.0757], + device='cuda:5'), covar=tensor([0.3798, 0.4279, 0.3441, 0.4084, 0.5352, 0.3958, 0.4773, 0.3263], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0239, 0.0256, 0.0266, 0.0266, 0.0238, 0.0279, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:13:02,492 INFO [finetune.py:976] (5/7) Epoch 15, batch 2150, loss[loss=0.1848, simple_loss=0.2628, pruned_loss=0.05334, over 4876.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2546, pruned_loss=0.05979, over 956878.40 frames. ], batch size: 34, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:13:06,117 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82343.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:13:06,660 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.137e+02 1.626e+02 1.861e+02 2.291e+02 4.001e+02, threshold=3.721e+02, percent-clipped=2.0 +2023-03-26 18:13:07,985 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82346.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:13:10,200 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 18:13:12,913 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6235, 1.4651, 1.9810, 3.3348, 2.2348, 2.2657, 0.8957, 2.7194], + device='cuda:5'), covar=tensor([0.1892, 0.1589, 0.1454, 0.0596, 0.0863, 0.1719, 0.2037, 0.0526], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0132, 0.0163, 0.0099, 0.0137, 0.0123, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:13:25,989 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=82374.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:13:35,321 INFO [finetune.py:976] (5/7) Epoch 15, batch 2200, loss[loss=0.2071, simple_loss=0.2749, pruned_loss=0.06966, over 4100.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2558, pruned_loss=0.05948, over 955414.69 frames. ], batch size: 65, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:13:39,021 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0680, 1.8676, 1.7418, 1.8558, 1.7771, 1.8346, 1.8732, 2.5316], + device='cuda:5'), covar=tensor([0.3759, 0.4785, 0.3344, 0.4544, 0.4409, 0.2394, 0.4145, 0.1832], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0227, 0.0276, 0.0248, 0.0215, 0.0250, 0.0226], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:13:48,067 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82407.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:13:50,424 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82411.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:13:54,668 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2566, 2.2282, 1.7115, 2.4569, 2.2443, 1.8941, 2.8252, 2.3407], + device='cuda:5'), covar=tensor([0.1457, 0.2768, 0.3474, 0.3134, 0.2827, 0.1875, 0.3679, 0.1968], + device='cuda:5'), in_proj_covar=tensor([0.0181, 0.0188, 0.0235, 0.0254, 0.0247, 0.0201, 0.0214, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:14:08,112 INFO [finetune.py:976] (5/7) Epoch 15, batch 2250, loss[loss=0.1417, simple_loss=0.2167, pruned_loss=0.03339, over 4748.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2567, pruned_loss=0.05905, over 957088.33 frames. ], batch size: 23, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:14:12,182 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.505e+02 1.711e+02 2.071e+02 3.892e+02, threshold=3.421e+02, percent-clipped=2.0 +2023-03-26 18:14:23,549 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 18:14:41,717 INFO [finetune.py:976] (5/7) Epoch 15, batch 2300, loss[loss=0.2014, simple_loss=0.2738, pruned_loss=0.06445, over 4774.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2565, pruned_loss=0.05839, over 957165.86 frames. ], batch size: 29, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:14:45,300 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82493.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 18:15:17,445 INFO [finetune.py:976] (5/7) Epoch 15, batch 2350, loss[loss=0.1989, simple_loss=0.248, pruned_loss=0.07487, over 4785.00 frames. ], tot_loss[loss=0.1843, simple_loss=0.2532, pruned_loss=0.05765, over 955371.32 frames. ], batch size: 29, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:15:21,098 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.644e+02 1.984e+02 2.390e+02 4.799e+02, threshold=3.967e+02, percent-clipped=3.0 +2023-03-26 18:15:28,736 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82554.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 18:15:44,886 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82569.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:16:00,853 INFO [finetune.py:976] (5/7) Epoch 15, batch 2400, loss[loss=0.1631, simple_loss=0.2317, pruned_loss=0.04724, over 4820.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2505, pruned_loss=0.0568, over 955230.59 frames. ], batch size: 38, lr: 3.50e-03, grad_scale: 32.0 +2023-03-26 18:16:15,937 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3040, 4.7066, 4.8856, 5.1467, 4.9989, 4.6662, 5.4443, 1.6671], + device='cuda:5'), covar=tensor([0.0725, 0.0710, 0.0768, 0.1010, 0.1223, 0.1625, 0.0476, 0.5791], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0244, 0.0276, 0.0291, 0.0331, 0.0284, 0.0298, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:16:35,308 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-26 18:16:37,309 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82630.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:16:42,547 INFO [finetune.py:976] (5/7) Epoch 15, batch 2450, loss[loss=0.175, simple_loss=0.242, pruned_loss=0.054, over 4829.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2472, pruned_loss=0.05563, over 956224.98 frames. ], batch size: 30, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:16:45,690 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82643.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:16:45,758 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-26 18:16:46,165 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.277e+01 1.596e+02 1.937e+02 2.264e+02 4.235e+02, threshold=3.875e+02, percent-clipped=1.0 +2023-03-26 18:17:07,454 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6848, 4.1225, 3.9577, 2.1117, 4.1783, 3.2006, 0.8183, 2.9177], + device='cuda:5'), covar=tensor([0.2447, 0.2117, 0.1299, 0.2928, 0.0824, 0.0798, 0.4313, 0.1350], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0173, 0.0158, 0.0127, 0.0156, 0.0121, 0.0144, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 18:17:13,253 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 18:17:13,490 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.5975, 3.2642, 2.8642, 1.6162, 3.0626, 2.6197, 2.4847, 2.7251], + device='cuda:5'), covar=tensor([0.0879, 0.0710, 0.1466, 0.1949, 0.1455, 0.1919, 0.1658, 0.0959], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0194, 0.0200, 0.0184, 0.0213, 0.0207, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:17:18,143 INFO [finetune.py:976] (5/7) Epoch 15, batch 2500, loss[loss=0.2107, simple_loss=0.2844, pruned_loss=0.06852, over 4838.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2512, pruned_loss=0.05824, over 953608.45 frames. ], batch size: 47, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:17:20,059 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=82691.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:17:35,740 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82702.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:17:46,369 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82711.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:18:03,676 INFO [finetune.py:976] (5/7) Epoch 15, batch 2550, loss[loss=0.2192, simple_loss=0.2893, pruned_loss=0.07453, over 4273.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2552, pruned_loss=0.0592, over 950729.52 frames. ], batch size: 65, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:18:07,765 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.099e+02 1.574e+02 1.850e+02 2.269e+02 4.152e+02, threshold=3.700e+02, percent-clipped=3.0 +2023-03-26 18:18:17,863 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=82759.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:18:36,860 INFO [finetune.py:976] (5/7) Epoch 15, batch 2600, loss[loss=0.1625, simple_loss=0.2317, pruned_loss=0.04666, over 4758.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2555, pruned_loss=0.05917, over 950456.55 frames. ], batch size: 26, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:18:53,103 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 18:19:00,544 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0424, 1.9294, 2.0988, 1.6536, 1.9821, 2.1960, 2.2295, 1.7942], + device='cuda:5'), covar=tensor([0.0475, 0.0497, 0.0551, 0.0731, 0.0856, 0.0523, 0.0418, 0.0862], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0135, 0.0143, 0.0124, 0.0125, 0.0142, 0.0142, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:19:07,224 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4402, 1.4134, 1.9001, 2.8794, 1.9165, 2.1065, 0.9136, 2.4816], + device='cuda:5'), covar=tensor([0.1854, 0.1490, 0.1256, 0.0639, 0.0858, 0.1357, 0.1885, 0.0536], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0133, 0.0164, 0.0100, 0.0138, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:19:10,656 INFO [finetune.py:976] (5/7) Epoch 15, batch 2650, loss[loss=0.2047, simple_loss=0.2748, pruned_loss=0.06731, over 4907.00 frames. ], tot_loss[loss=0.1888, simple_loss=0.2576, pruned_loss=0.06001, over 952168.52 frames. ], batch size: 38, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:19:14,266 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.056e+02 1.579e+02 1.879e+02 2.251e+02 6.929e+02, threshold=3.759e+02, percent-clipped=2.0 +2023-03-26 18:19:15,045 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-03-26 18:19:17,845 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82849.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 18:19:26,917 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 18:19:43,181 INFO [finetune.py:976] (5/7) Epoch 15, batch 2700, loss[loss=0.1221, simple_loss=0.1912, pruned_loss=0.02646, over 4709.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2565, pruned_loss=0.05941, over 952668.77 frames. ], batch size: 23, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:20:08,049 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82925.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:20:16,131 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.84 vs. limit=5.0 +2023-03-26 18:20:16,388 INFO [finetune.py:976] (5/7) Epoch 15, batch 2750, loss[loss=0.1788, simple_loss=0.2453, pruned_loss=0.05617, over 4892.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2531, pruned_loss=0.05839, over 951559.97 frames. ], batch size: 32, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:20:20,502 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.574e+02 1.758e+02 2.107e+02 4.076e+02, threshold=3.515e+02, percent-clipped=2.0 +2023-03-26 18:20:33,239 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82963.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:20:36,353 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 18:20:49,690 INFO [finetune.py:976] (5/7) Epoch 15, batch 2800, loss[loss=0.1702, simple_loss=0.2483, pruned_loss=0.04608, over 4776.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2497, pruned_loss=0.05693, over 950853.64 frames. ], batch size: 26, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:21:00,567 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7328, 1.6190, 1.5811, 1.6864, 1.3767, 3.7757, 1.5091, 2.1201], + device='cuda:5'), covar=tensor([0.3341, 0.2556, 0.2143, 0.2361, 0.1738, 0.0169, 0.2452, 0.1208], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0120, 0.0123, 0.0114, 0.0097, 0.0097, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:21:07,075 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83002.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:21:19,012 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0315, 1.8628, 2.5140, 3.5698, 2.5294, 2.7402, 1.5398, 2.9681], + device='cuda:5'), covar=tensor([0.1612, 0.1435, 0.1200, 0.0596, 0.0758, 0.1292, 0.1665, 0.0496], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0132, 0.0163, 0.0099, 0.0137, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:21:21,956 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83024.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:21:37,765 INFO [finetune.py:976] (5/7) Epoch 15, batch 2850, loss[loss=0.1836, simple_loss=0.2538, pruned_loss=0.05671, over 4796.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2484, pruned_loss=0.05716, over 949104.73 frames. ], batch size: 51, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:21:41,399 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.604e+02 1.866e+02 2.227e+02 4.125e+02, threshold=3.733e+02, percent-clipped=3.0 +2023-03-26 18:21:49,028 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=83050.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:22:15,029 INFO [finetune.py:976] (5/7) Epoch 15, batch 2900, loss[loss=0.1768, simple_loss=0.2424, pruned_loss=0.05563, over 4884.00 frames. ], tot_loss[loss=0.1845, simple_loss=0.2517, pruned_loss=0.0587, over 948843.20 frames. ], batch size: 32, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:22:50,282 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-03-26 18:22:57,703 INFO [finetune.py:976] (5/7) Epoch 15, batch 2950, loss[loss=0.2007, simple_loss=0.2663, pruned_loss=0.06758, over 4808.00 frames. ], tot_loss[loss=0.1878, simple_loss=0.2558, pruned_loss=0.05995, over 949800.49 frames. ], batch size: 38, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:22:59,676 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3592, 2.9258, 2.5857, 1.2902, 2.7523, 2.3081, 2.2218, 2.5304], + device='cuda:5'), covar=tensor([0.0731, 0.0886, 0.1759, 0.2209, 0.1765, 0.1999, 0.2224, 0.1228], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0194, 0.0199, 0.0183, 0.0212, 0.0206, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:23:01,329 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 1.748e+02 2.030e+02 2.368e+02 3.585e+02, threshold=4.059e+02, percent-clipped=0.0 +2023-03-26 18:23:08,899 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83149.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 18:23:21,870 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8598, 1.2155, 1.8882, 1.7687, 1.5919, 1.5422, 1.7040, 1.6855], + device='cuda:5'), covar=tensor([0.2746, 0.3212, 0.2527, 0.2913, 0.3907, 0.3132, 0.3543, 0.2608], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0237, 0.0255, 0.0267, 0.0265, 0.0238, 0.0278, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:23:37,596 INFO [finetune.py:976] (5/7) Epoch 15, batch 3000, loss[loss=0.2219, simple_loss=0.2948, pruned_loss=0.07447, over 4816.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2579, pruned_loss=0.06067, over 950613.23 frames. ], batch size: 39, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:23:37,597 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 18:23:45,211 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8399, 3.4359, 3.5103, 3.7206, 3.6147, 3.4717, 3.9331, 1.2830], + device='cuda:5'), covar=tensor([0.0862, 0.0896, 0.0940, 0.1041, 0.1311, 0.1517, 0.0747, 0.5302], + device='cuda:5'), in_proj_covar=tensor([0.0354, 0.0247, 0.0279, 0.0294, 0.0336, 0.0286, 0.0303, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:23:47,216 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8829, 3.4902, 3.5592, 3.7666, 3.6355, 3.5014, 3.9717, 1.2857], + device='cuda:5'), covar=tensor([0.0933, 0.0968, 0.0962, 0.1042, 0.1509, 0.1621, 0.0838, 0.5293], + device='cuda:5'), in_proj_covar=tensor([0.0354, 0.0247, 0.0279, 0.0294, 0.0336, 0.0286, 0.0303, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:23:47,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4839, 1.4429, 1.5548, 1.5482, 1.6024, 2.9811, 1.3346, 1.5308], + device='cuda:5'), covar=tensor([0.0959, 0.1840, 0.0948, 0.0943, 0.1497, 0.0292, 0.1459, 0.1685], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0073, 0.0077, 0.0092, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:23:48,368 INFO [finetune.py:1010] (5/7) Epoch 15, validation: loss=0.1564, simple_loss=0.2269, pruned_loss=0.04296, over 2265189.00 frames. +2023-03-26 18:23:48,369 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 18:23:50,863 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.76 vs. limit=5.0 +2023-03-26 18:23:59,725 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83196.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:24:00,272 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=83197.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 18:24:21,534 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83225.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:24:27,000 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-03-26 18:24:30,321 INFO [finetune.py:976] (5/7) Epoch 15, batch 3050, loss[loss=0.1918, simple_loss=0.2569, pruned_loss=0.06333, over 4815.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2577, pruned_loss=0.06019, over 950967.69 frames. ], batch size: 33, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:24:34,915 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.542e+02 1.763e+02 2.135e+02 3.801e+02, threshold=3.526e+02, percent-clipped=0.0 +2023-03-26 18:24:44,058 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83257.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:24:54,193 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=83273.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:25:01,277 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83284.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:25:04,099 INFO [finetune.py:976] (5/7) Epoch 15, batch 3100, loss[loss=0.1486, simple_loss=0.2109, pruned_loss=0.04317, over 4932.00 frames. ], tot_loss[loss=0.1878, simple_loss=0.2566, pruned_loss=0.05947, over 953914.04 frames. ], batch size: 33, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:25:24,786 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83319.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:25:31,742 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 18:25:34,165 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-26 18:25:37,274 INFO [finetune.py:976] (5/7) Epoch 15, batch 3150, loss[loss=0.1572, simple_loss=0.2395, pruned_loss=0.03745, over 4905.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2535, pruned_loss=0.05873, over 956300.17 frames. ], batch size: 35, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:25:41,385 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.111e+02 1.524e+02 1.821e+02 2.258e+02 3.585e+02, threshold=3.643e+02, percent-clipped=2.0 +2023-03-26 18:25:42,592 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83345.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:26:12,520 INFO [finetune.py:976] (5/7) Epoch 15, batch 3200, loss[loss=0.2033, simple_loss=0.2586, pruned_loss=0.07402, over 4905.00 frames. ], tot_loss[loss=0.1819, simple_loss=0.2498, pruned_loss=0.05699, over 956802.92 frames. ], batch size: 32, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:26:25,120 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1933, 1.3551, 1.4226, 0.7883, 1.3339, 1.5313, 1.5780, 1.3284], + device='cuda:5'), covar=tensor([0.0811, 0.0584, 0.0432, 0.0487, 0.0454, 0.0598, 0.0332, 0.0578], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0151, 0.0123, 0.0128, 0.0129, 0.0126, 0.0140, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.2010e-05, 1.1003e-04, 8.8670e-05, 9.1941e-05, 9.1579e-05, 9.1222e-05, + 1.0130e-04, 1.0574e-04], device='cuda:5') +2023-03-26 18:26:41,146 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 18:26:43,397 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 18:26:55,876 INFO [finetune.py:976] (5/7) Epoch 15, batch 3250, loss[loss=0.2167, simple_loss=0.2681, pruned_loss=0.08261, over 4103.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2508, pruned_loss=0.05783, over 955523.70 frames. ], batch size: 65, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:27:00,086 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.648e+01 1.539e+02 1.854e+02 2.232e+02 3.646e+02, threshold=3.708e+02, percent-clipped=1.0 +2023-03-26 18:27:07,597 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7028, 2.9710, 2.9382, 2.0698, 2.9068, 3.2512, 3.0893, 2.5918], + device='cuda:5'), covar=tensor([0.0581, 0.0516, 0.0603, 0.0871, 0.0531, 0.0653, 0.0588, 0.0954], + device='cuda:5'), in_proj_covar=tensor([0.0137, 0.0137, 0.0145, 0.0126, 0.0126, 0.0144, 0.0144, 0.0167], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:27:18,465 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.58 vs. limit=5.0 +2023-03-26 18:27:26,004 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6154, 1.5442, 2.1213, 1.8796, 1.8944, 4.3384, 1.4877, 1.8648], + device='cuda:5'), covar=tensor([0.0903, 0.1705, 0.1184, 0.0953, 0.1461, 0.0156, 0.1486, 0.1620], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0077, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:27:29,590 INFO [finetune.py:976] (5/7) Epoch 15, batch 3300, loss[loss=0.1773, simple_loss=0.2508, pruned_loss=0.05191, over 4914.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2551, pruned_loss=0.05939, over 956011.56 frames. ], batch size: 36, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:28:07,470 INFO [finetune.py:976] (5/7) Epoch 15, batch 3350, loss[loss=0.2116, simple_loss=0.278, pruned_loss=0.07262, over 4931.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2571, pruned_loss=0.0606, over 954254.60 frames. ], batch size: 33, lr: 3.49e-03, grad_scale: 64.0 +2023-03-26 18:28:14,630 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.071e+02 1.792e+02 2.041e+02 2.510e+02 5.102e+02, threshold=4.082e+02, percent-clipped=3.0 +2023-03-26 18:28:20,780 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.11 vs. limit=5.0 +2023-03-26 18:28:21,870 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83552.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:28:33,757 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5547, 1.2945, 1.9043, 3.0541, 1.9389, 2.3119, 0.8964, 2.5324], + device='cuda:5'), covar=tensor([0.2013, 0.2000, 0.1653, 0.0855, 0.1064, 0.1852, 0.2206, 0.0715], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0133, 0.0164, 0.0101, 0.0138, 0.0124, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:28:54,248 INFO [finetune.py:976] (5/7) Epoch 15, batch 3400, loss[loss=0.1977, simple_loss=0.2706, pruned_loss=0.06241, over 4842.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2578, pruned_loss=0.06056, over 954724.78 frames. ], batch size: 44, lr: 3.49e-03, grad_scale: 64.0 +2023-03-26 18:29:24,888 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83619.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:29:37,312 INFO [finetune.py:976] (5/7) Epoch 15, batch 3450, loss[loss=0.1887, simple_loss=0.242, pruned_loss=0.06768, over 4703.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2572, pruned_loss=0.06003, over 956735.93 frames. ], batch size: 23, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:29:39,048 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83640.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:29:41,997 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.066e+02 1.651e+02 1.928e+02 2.236e+02 3.717e+02, threshold=3.855e+02, percent-clipped=0.0 +2023-03-26 18:29:45,896 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.09 vs. limit=5.0 +2023-03-26 18:29:51,114 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.14 vs. limit=5.0 +2023-03-26 18:29:57,378 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=83667.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:30:11,004 INFO [finetune.py:976] (5/7) Epoch 15, batch 3500, loss[loss=0.1672, simple_loss=0.2392, pruned_loss=0.04756, over 4894.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2543, pruned_loss=0.05888, over 955479.76 frames. ], batch size: 43, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:30:44,677 INFO [finetune.py:976] (5/7) Epoch 15, batch 3550, loss[loss=0.2018, simple_loss=0.2644, pruned_loss=0.06961, over 4911.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2515, pruned_loss=0.05764, over 955784.03 frames. ], batch size: 36, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:30:49,417 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.222e+01 1.573e+02 1.880e+02 2.102e+02 4.250e+02, threshold=3.760e+02, percent-clipped=2.0 +2023-03-26 18:30:49,827 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.36 vs. limit=5.0 +2023-03-26 18:30:59,102 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83760.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:31:18,473 INFO [finetune.py:976] (5/7) Epoch 15, batch 3600, loss[loss=0.1854, simple_loss=0.2549, pruned_loss=0.05796, over 4891.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2493, pruned_loss=0.05716, over 956470.81 frames. ], batch size: 35, lr: 3.49e-03, grad_scale: 32.0 +2023-03-26 18:31:47,129 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 18:31:48,183 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83821.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:31:59,980 INFO [finetune.py:976] (5/7) Epoch 15, batch 3650, loss[loss=0.2276, simple_loss=0.2912, pruned_loss=0.08205, over 4824.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2516, pruned_loss=0.05811, over 955811.72 frames. ], batch size: 39, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:32:00,087 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8208, 2.6702, 3.3583, 4.6185, 3.3672, 3.4390, 1.7121, 3.7811], + device='cuda:5'), covar=tensor([0.1410, 0.1133, 0.1119, 0.0415, 0.0579, 0.1044, 0.1649, 0.0359], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0132, 0.0164, 0.0100, 0.0138, 0.0124, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:32:04,776 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.603e+02 1.941e+02 2.306e+02 4.863e+02, threshold=3.882e+02, percent-clipped=1.0 +2023-03-26 18:32:09,569 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83852.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:32:11,769 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 18:32:12,694 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9902, 1.9597, 1.6256, 1.9194, 1.7964, 1.8090, 1.8623, 2.5065], + device='cuda:5'), covar=tensor([0.3998, 0.4080, 0.3472, 0.4020, 0.4230, 0.2487, 0.3925, 0.1749], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0258, 0.0225, 0.0274, 0.0245, 0.0214, 0.0248, 0.0225], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:32:33,847 INFO [finetune.py:976] (5/7) Epoch 15, batch 3700, loss[loss=0.1822, simple_loss=0.2409, pruned_loss=0.06179, over 4152.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2541, pruned_loss=0.05886, over 951952.81 frames. ], batch size: 65, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:32:42,153 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=83900.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:33:01,277 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9481, 1.8020, 1.5763, 1.6450, 1.6558, 1.6676, 1.7344, 2.3367], + device='cuda:5'), covar=tensor([0.3498, 0.3787, 0.3052, 0.3522, 0.3713, 0.2192, 0.3573, 0.1742], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0258, 0.0225, 0.0274, 0.0245, 0.0214, 0.0248, 0.0225], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:33:07,102 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0785, 1.0114, 1.0778, 0.3267, 0.9195, 1.2078, 1.2224, 1.0490], + device='cuda:5'), covar=tensor([0.0856, 0.0538, 0.0487, 0.0561, 0.0505, 0.0567, 0.0384, 0.0643], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0151, 0.0124, 0.0128, 0.0130, 0.0126, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.1988e-05, 1.0979e-04, 8.8955e-05, 9.1836e-05, 9.2414e-05, 9.1130e-05, + 1.0172e-04, 1.0631e-04], device='cuda:5') +2023-03-26 18:33:07,590 INFO [finetune.py:976] (5/7) Epoch 15, batch 3750, loss[loss=0.1899, simple_loss=0.2584, pruned_loss=0.06073, over 4775.00 frames. ], tot_loss[loss=0.1878, simple_loss=0.2562, pruned_loss=0.05968, over 951953.08 frames. ], batch size: 28, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:33:08,906 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83940.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:33:09,573 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2289, 2.0968, 1.5915, 2.2620, 2.0546, 1.8262, 2.5106, 2.1633], + device='cuda:5'), covar=tensor([0.1275, 0.2198, 0.3174, 0.2649, 0.2674, 0.1716, 0.2980, 0.1931], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0188, 0.0235, 0.0255, 0.0246, 0.0201, 0.0212, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:33:11,800 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.596e+02 1.977e+02 2.275e+02 5.079e+02, threshold=3.955e+02, percent-clipped=1.0 +2023-03-26 18:33:14,855 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83949.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:33:30,655 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-03-26 18:33:42,955 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5440, 1.4663, 1.7932, 1.7752, 1.6729, 3.4420, 1.4345, 1.5519], + device='cuda:5'), covar=tensor([0.0957, 0.1725, 0.1074, 0.0954, 0.1501, 0.0216, 0.1419, 0.1727], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0077, 0.0091, 0.0080, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:33:53,409 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83985.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:33:55,620 INFO [finetune.py:976] (5/7) Epoch 15, batch 3800, loss[loss=0.1918, simple_loss=0.2609, pruned_loss=0.0613, over 4827.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2579, pruned_loss=0.06052, over 950680.14 frames. ], batch size: 30, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:33:55,683 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=83988.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:34:10,619 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7441, 1.6371, 1.4827, 1.8368, 1.9148, 1.8334, 1.2808, 1.4939], + device='cuda:5'), covar=tensor([0.2240, 0.2024, 0.1929, 0.1630, 0.1694, 0.1173, 0.2611, 0.1878], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0207, 0.0209, 0.0191, 0.0242, 0.0184, 0.0215, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:34:11,201 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84010.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 18:34:14,207 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0931, 2.0330, 1.7157, 2.0054, 1.8873, 1.9096, 1.9807, 2.6383], + device='cuda:5'), covar=tensor([0.3700, 0.4150, 0.3239, 0.3960, 0.3816, 0.2307, 0.3586, 0.1623], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0258, 0.0225, 0.0274, 0.0245, 0.0214, 0.0248, 0.0225], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:34:36,933 INFO [finetune.py:976] (5/7) Epoch 15, batch 3850, loss[loss=0.211, simple_loss=0.2735, pruned_loss=0.07424, over 4803.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2571, pruned_loss=0.05969, over 953347.32 frames. ], batch size: 51, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:34:41,717 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.557e+01 1.496e+02 1.862e+02 2.338e+02 3.560e+02, threshold=3.724e+02, percent-clipped=0.0 +2023-03-26 18:34:42,465 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84046.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:35:06,778 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.74 vs. limit=5.0 +2023-03-26 18:35:10,037 INFO [finetune.py:976] (5/7) Epoch 15, batch 3900, loss[loss=0.1883, simple_loss=0.2462, pruned_loss=0.06521, over 4918.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2535, pruned_loss=0.05849, over 955920.34 frames. ], batch size: 32, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:35:16,534 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84097.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:35:21,302 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.96 vs. limit=5.0 +2023-03-26 18:35:28,932 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84116.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:35:43,582 INFO [finetune.py:976] (5/7) Epoch 15, batch 3950, loss[loss=0.2039, simple_loss=0.2657, pruned_loss=0.07103, over 4758.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2508, pruned_loss=0.05789, over 955633.93 frames. ], batch size: 54, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:35:47,769 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.469e+02 1.885e+02 2.278e+02 4.120e+02, threshold=3.770e+02, percent-clipped=1.0 +2023-03-26 18:35:57,220 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84158.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:36:16,805 INFO [finetune.py:976] (5/7) Epoch 15, batch 4000, loss[loss=0.1771, simple_loss=0.2391, pruned_loss=0.05753, over 4770.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2509, pruned_loss=0.05867, over 952857.83 frames. ], batch size: 28, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:36:24,523 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84199.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:36:57,632 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84236.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:36:59,240 INFO [finetune.py:976] (5/7) Epoch 15, batch 4050, loss[loss=0.2298, simple_loss=0.2908, pruned_loss=0.0844, over 4832.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2541, pruned_loss=0.05985, over 952336.52 frames. ], batch size: 33, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:37:07,827 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.087e+02 1.586e+02 1.909e+02 2.268e+02 5.729e+02, threshold=3.818e+02, percent-clipped=2.0 +2023-03-26 18:37:21,559 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84260.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:37:24,442 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5086, 1.3473, 1.8738, 1.2004, 1.5159, 1.6668, 1.2562, 1.8883], + device='cuda:5'), covar=tensor([0.1393, 0.2313, 0.1275, 0.1789, 0.1167, 0.1589, 0.3088, 0.1077], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0205, 0.0194, 0.0191, 0.0178, 0.0214, 0.0219, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:37:27,931 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 18:37:39,954 INFO [finetune.py:976] (5/7) Epoch 15, batch 4100, loss[loss=0.1817, simple_loss=0.2479, pruned_loss=0.05771, over 4744.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2553, pruned_loss=0.05976, over 952662.55 frames. ], batch size: 27, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:37:46,452 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84297.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:37:52,177 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84305.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 18:37:53,494 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6109, 2.3499, 2.2895, 2.5078, 2.3528, 2.4279, 2.2891, 2.9822], + device='cuda:5'), covar=tensor([0.3487, 0.4374, 0.3167, 0.3715, 0.3615, 0.2482, 0.3848, 0.1729], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0225, 0.0274, 0.0247, 0.0214, 0.0248, 0.0225], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:38:11,874 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84336.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:38:13,430 INFO [finetune.py:976] (5/7) Epoch 15, batch 4150, loss[loss=0.1317, simple_loss=0.2045, pruned_loss=0.02948, over 4773.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2558, pruned_loss=0.05946, over 950750.58 frames. ], batch size: 26, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:38:15,349 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84341.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:38:16,422 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5155, 1.2987, 1.8217, 3.1452, 1.9756, 2.4097, 0.8647, 2.7053], + device='cuda:5'), covar=tensor([0.2047, 0.2131, 0.1714, 0.0851, 0.1083, 0.1495, 0.2196, 0.0643], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0133, 0.0164, 0.0100, 0.0138, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:38:18,113 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.618e+02 1.997e+02 2.307e+02 7.274e+02, threshold=3.993e+02, percent-clipped=3.0 +2023-03-26 18:38:21,706 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9929, 1.6778, 2.3654, 3.7680, 2.5288, 2.6629, 1.0297, 3.1197], + device='cuda:5'), covar=tensor([0.1791, 0.1452, 0.1303, 0.0526, 0.0807, 0.1691, 0.1835, 0.0452], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0117, 0.0133, 0.0165, 0.0100, 0.0138, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:38:50,309 INFO [finetune.py:976] (5/7) Epoch 15, batch 4200, loss[loss=0.1629, simple_loss=0.2313, pruned_loss=0.04726, over 4726.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2564, pruned_loss=0.05948, over 948529.07 frames. ], batch size: 26, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:38:55,691 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-26 18:39:04,824 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84397.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:39:17,866 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84416.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:39:31,952 INFO [finetune.py:976] (5/7) Epoch 15, batch 4250, loss[loss=0.2307, simple_loss=0.2877, pruned_loss=0.08685, over 4780.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2548, pruned_loss=0.059, over 950002.95 frames. ], batch size: 51, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:39:34,030 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.03 vs. limit=5.0 +2023-03-26 18:39:36,667 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.324e+01 1.559e+02 1.825e+02 2.300e+02 4.289e+02, threshold=3.650e+02, percent-clipped=1.0 +2023-03-26 18:39:47,483 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84453.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:39:58,791 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=84464.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:40:14,296 INFO [finetune.py:976] (5/7) Epoch 15, batch 4300, loss[loss=0.1794, simple_loss=0.2612, pruned_loss=0.04878, over 4899.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2517, pruned_loss=0.05822, over 950510.73 frames. ], batch size: 32, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:40:15,614 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0760, 2.0082, 1.7810, 2.1810, 2.6505, 2.1295, 1.9571, 1.6371], + device='cuda:5'), covar=tensor([0.2179, 0.2132, 0.1923, 0.1586, 0.1858, 0.1151, 0.2274, 0.1901], + device='cuda:5'), in_proj_covar=tensor([0.0237, 0.0205, 0.0207, 0.0189, 0.0239, 0.0182, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:40:22,755 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2725, 1.3693, 1.5448, 1.0752, 1.3009, 1.4930, 1.3426, 1.6955], + device='cuda:5'), covar=tensor([0.1143, 0.1887, 0.1192, 0.1286, 0.0887, 0.1124, 0.2680, 0.0768], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0203, 0.0193, 0.0189, 0.0176, 0.0212, 0.0216, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:40:39,111 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84525.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:40:47,842 INFO [finetune.py:976] (5/7) Epoch 15, batch 4350, loss[loss=0.164, simple_loss=0.2274, pruned_loss=0.05025, over 4151.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2486, pruned_loss=0.05715, over 950948.05 frames. ], batch size: 18, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:40:52,222 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.502e+02 1.820e+02 2.196e+02 3.984e+02, threshold=3.641e+02, percent-clipped=2.0 +2023-03-26 18:40:58,858 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84555.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:41:15,770 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.92 vs. limit=5.0 +2023-03-26 18:41:19,652 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84586.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:41:21,103 INFO [finetune.py:976] (5/7) Epoch 15, batch 4400, loss[loss=0.2153, simple_loss=0.2711, pruned_loss=0.07972, over 4834.00 frames. ], tot_loss[loss=0.1831, simple_loss=0.25, pruned_loss=0.05809, over 950442.88 frames. ], batch size: 30, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:41:24,139 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84592.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:41:28,629 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.50 vs. limit=5.0 +2023-03-26 18:41:32,787 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84605.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:41:51,829 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84633.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:41:54,827 INFO [finetune.py:976] (5/7) Epoch 15, batch 4450, loss[loss=0.1691, simple_loss=0.2501, pruned_loss=0.04405, over 4728.00 frames. ], tot_loss[loss=0.1865, simple_loss=0.2538, pruned_loss=0.05962, over 947441.41 frames. ], batch size: 59, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:41:57,734 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84641.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:42:01,981 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.595e+02 1.952e+02 2.292e+02 4.719e+02, threshold=3.904e+02, percent-clipped=1.0 +2023-03-26 18:42:03,394 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8680, 2.6575, 2.4845, 2.9644, 2.6041, 2.6470, 2.6413, 3.5692], + device='cuda:5'), covar=tensor([0.3191, 0.4272, 0.2943, 0.3948, 0.3713, 0.2201, 0.3863, 0.1525], + device='cuda:5'), in_proj_covar=tensor([0.0284, 0.0259, 0.0225, 0.0274, 0.0247, 0.0214, 0.0248, 0.0225], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:42:11,296 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=84653.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:42:40,878 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-26 18:42:46,431 INFO [finetune.py:976] (5/7) Epoch 15, batch 4500, loss[loss=0.2227, simple_loss=0.2832, pruned_loss=0.08114, over 4839.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2561, pruned_loss=0.06037, over 946595.95 frames. ], batch size: 49, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:42:47,111 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=84689.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:42:49,430 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84692.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:42:51,217 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84694.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:43:20,119 INFO [finetune.py:976] (5/7) Epoch 15, batch 4550, loss[loss=0.1348, simple_loss=0.211, pruned_loss=0.02925, over 4779.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.258, pruned_loss=0.06104, over 949684.63 frames. ], batch size: 25, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:43:25,283 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.592e+02 2.005e+02 2.406e+02 4.528e+02, threshold=4.009e+02, percent-clipped=3.0 +2023-03-26 18:43:30,232 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84753.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:43:44,079 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-03-26 18:43:51,684 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 18:43:53,703 INFO [finetune.py:976] (5/7) Epoch 15, batch 4600, loss[loss=0.166, simple_loss=0.2423, pruned_loss=0.04483, over 4773.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2559, pruned_loss=0.05953, over 948561.29 frames. ], batch size: 54, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:43:56,620 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-03-26 18:44:06,845 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84800.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:44:07,397 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=84801.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:44:19,392 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 18:44:36,214 INFO [finetune.py:976] (5/7) Epoch 15, batch 4650, loss[loss=0.1433, simple_loss=0.2151, pruned_loss=0.03576, over 4851.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2534, pruned_loss=0.05861, over 949838.07 frames. ], batch size: 44, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:44:40,391 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.584e+02 1.933e+02 2.372e+02 3.946e+02, threshold=3.865e+02, percent-clipped=0.0 +2023-03-26 18:44:46,432 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9213, 1.3920, 1.9890, 1.9361, 1.6985, 1.6494, 1.8662, 1.8009], + device='cuda:5'), covar=tensor([0.3226, 0.3553, 0.2955, 0.3171, 0.4242, 0.3393, 0.3789, 0.2818], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0239, 0.0258, 0.0269, 0.0268, 0.0241, 0.0280, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:44:47,568 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84855.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:44:56,253 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84861.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:45:17,982 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84881.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:45:26,375 INFO [finetune.py:976] (5/7) Epoch 15, batch 4700, loss[loss=0.2062, simple_loss=0.2591, pruned_loss=0.07668, over 4742.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.251, pruned_loss=0.05821, over 950982.85 frames. ], batch size: 59, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:45:29,358 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84892.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:45:36,524 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=84903.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:45:59,761 INFO [finetune.py:976] (5/7) Epoch 15, batch 4750, loss[loss=0.1696, simple_loss=0.2316, pruned_loss=0.05381, over 4827.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2482, pruned_loss=0.05696, over 952032.64 frames. ], batch size: 30, lr: 3.48e-03, grad_scale: 32.0 +2023-03-26 18:46:01,518 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=84940.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:46:04,957 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.000e+02 1.651e+02 1.892e+02 2.436e+02 4.596e+02, threshold=3.784e+02, percent-clipped=1.0 +2023-03-26 18:46:08,781 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84951.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:46:26,161 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-26 18:46:33,701 INFO [finetune.py:976] (5/7) Epoch 15, batch 4800, loss[loss=0.2352, simple_loss=0.294, pruned_loss=0.0882, over 4908.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.249, pruned_loss=0.05725, over 952263.48 frames. ], batch size: 36, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:46:34,853 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84989.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:46:36,726 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84992.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:46:50,492 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85012.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:47:02,305 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7677, 1.6326, 1.5972, 1.6640, 1.2307, 4.3061, 1.6679, 2.0507], + device='cuda:5'), covar=tensor([0.3212, 0.2439, 0.2087, 0.2260, 0.1785, 0.0131, 0.2286, 0.1227], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0124, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:47:04,873 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 18:47:07,604 INFO [finetune.py:976] (5/7) Epoch 15, batch 4850, loss[loss=0.1626, simple_loss=0.232, pruned_loss=0.04661, over 4817.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2529, pruned_loss=0.05874, over 950640.37 frames. ], batch size: 30, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:47:09,347 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=85040.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:47:12,314 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.585e+02 1.858e+02 2.141e+02 6.123e+02, threshold=3.716e+02, percent-clipped=1.0 +2023-03-26 18:47:39,317 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 18:47:42,704 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7806, 1.7445, 2.1656, 3.1122, 2.1566, 2.3081, 1.3484, 2.4459], + device='cuda:5'), covar=tensor([0.1389, 0.1140, 0.1034, 0.0492, 0.0685, 0.1738, 0.1407, 0.0520], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0131, 0.0162, 0.0098, 0.0136, 0.0123, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:47:50,156 INFO [finetune.py:976] (5/7) Epoch 15, batch 4900, loss[loss=0.1847, simple_loss=0.2641, pruned_loss=0.05267, over 4899.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2551, pruned_loss=0.05931, over 952068.14 frames. ], batch size: 43, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:48:06,030 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85107.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:48:11,353 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85115.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:48:26,708 INFO [finetune.py:976] (5/7) Epoch 15, batch 4950, loss[loss=0.2007, simple_loss=0.2699, pruned_loss=0.06576, over 4723.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2558, pruned_loss=0.05925, over 951883.25 frames. ], batch size: 59, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:48:31,438 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.158e+02 1.624e+02 1.884e+02 2.194e+02 3.725e+02, threshold=3.769e+02, percent-clipped=1.0 +2023-03-26 18:48:39,186 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85156.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:48:47,030 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85168.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 18:48:50,639 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 18:48:52,401 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85176.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:48:55,805 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85181.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:49:00,443 INFO [finetune.py:976] (5/7) Epoch 15, batch 5000, loss[loss=0.2094, simple_loss=0.2637, pruned_loss=0.07753, over 4881.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2553, pruned_loss=0.0591, over 951809.28 frames. ], batch size: 43, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:49:13,617 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2379, 1.7704, 2.1605, 2.1370, 1.8775, 1.8416, 2.0914, 2.0476], + device='cuda:5'), covar=tensor([0.3731, 0.4205, 0.3349, 0.3912, 0.4995, 0.4018, 0.4920, 0.3209], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0239, 0.0258, 0.0269, 0.0268, 0.0241, 0.0280, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:49:36,185 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=85229.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:49:42,107 INFO [finetune.py:976] (5/7) Epoch 15, batch 5050, loss[loss=0.13, simple_loss=0.1996, pruned_loss=0.03014, over 4241.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2521, pruned_loss=0.05792, over 953033.75 frames. ], batch size: 18, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:49:46,817 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.063e+02 1.593e+02 1.872e+02 2.269e+02 5.264e+02, threshold=3.745e+02, percent-clipped=1.0 +2023-03-26 18:49:53,368 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6414, 1.2564, 0.8350, 1.5252, 1.9759, 1.0754, 1.4643, 1.5701], + device='cuda:5'), covar=tensor([0.1427, 0.2000, 0.1895, 0.1202, 0.1889, 0.1932, 0.1345, 0.1854], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0092, 0.0118, 0.0094, 0.0099, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:50:22,631 INFO [finetune.py:976] (5/7) Epoch 15, batch 5100, loss[loss=0.1738, simple_loss=0.2465, pruned_loss=0.05057, over 4821.00 frames. ], tot_loss[loss=0.1807, simple_loss=0.2486, pruned_loss=0.05643, over 954756.85 frames. ], batch size: 40, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:50:23,340 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85289.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:50:23,982 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5120, 2.3910, 1.9131, 2.5343, 2.3050, 2.0344, 2.9793, 2.4511], + device='cuda:5'), covar=tensor([0.1259, 0.2375, 0.3074, 0.2668, 0.2667, 0.1686, 0.3145, 0.1732], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0189, 0.0235, 0.0255, 0.0246, 0.0202, 0.0213, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:50:32,906 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.55 vs. limit=5.0 +2023-03-26 18:50:42,799 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85307.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:51:02,802 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=85337.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:51:03,346 INFO [finetune.py:976] (5/7) Epoch 15, batch 5150, loss[loss=0.2364, simple_loss=0.2941, pruned_loss=0.08933, over 4742.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.05707, over 954304.48 frames. ], batch size: 54, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:51:08,104 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.937e+01 1.526e+02 1.888e+02 2.256e+02 3.382e+02, threshold=3.776e+02, percent-clipped=0.0 +2023-03-26 18:51:19,426 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 18:51:24,732 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9416, 1.7984, 1.5854, 1.6582, 1.6739, 1.6726, 1.7500, 2.3503], + device='cuda:5'), covar=tensor([0.3284, 0.4104, 0.2961, 0.3334, 0.3696, 0.2151, 0.3341, 0.1520], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0226, 0.0275, 0.0247, 0.0214, 0.0248, 0.0226], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 18:51:37,045 INFO [finetune.py:976] (5/7) Epoch 15, batch 5200, loss[loss=0.2124, simple_loss=0.2916, pruned_loss=0.06664, over 4907.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2521, pruned_loss=0.05833, over 951823.25 frames. ], batch size: 37, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:52:04,201 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85428.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:52:10,614 INFO [finetune.py:976] (5/7) Epoch 15, batch 5250, loss[loss=0.1693, simple_loss=0.2364, pruned_loss=0.05112, over 4854.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2541, pruned_loss=0.05891, over 951376.00 frames. ], batch size: 44, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:52:15,820 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.657e+02 1.928e+02 2.523e+02 8.274e+02, threshold=3.856e+02, percent-clipped=2.0 +2023-03-26 18:52:23,073 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85456.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:52:27,678 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85463.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 18:52:33,031 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85471.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:52:40,803 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85483.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:52:44,238 INFO [finetune.py:976] (5/7) Epoch 15, batch 5300, loss[loss=0.2371, simple_loss=0.2866, pruned_loss=0.09376, over 4797.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2552, pruned_loss=0.0593, over 951823.52 frames. ], batch size: 25, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:52:44,966 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85489.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:52:54,981 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=85504.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:53:19,697 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85530.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:53:24,909 INFO [finetune.py:976] (5/7) Epoch 15, batch 5350, loss[loss=0.1822, simple_loss=0.2373, pruned_loss=0.06356, over 4852.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2559, pruned_loss=0.05915, over 953073.86 frames. ], batch size: 31, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:53:28,696 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85544.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:53:29,178 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.542e+02 1.806e+02 2.197e+02 4.190e+02, threshold=3.613e+02, percent-clipped=2.0 +2023-03-26 18:53:58,026 INFO [finetune.py:976] (5/7) Epoch 15, batch 5400, loss[loss=0.1476, simple_loss=0.2165, pruned_loss=0.03936, over 4764.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2539, pruned_loss=0.05895, over 952159.95 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 32.0 +2023-03-26 18:54:00,456 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85591.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:54:05,834 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5570, 1.0820, 0.7891, 1.3772, 1.9996, 0.6689, 1.2531, 1.4324], + device='cuda:5'), covar=tensor([0.1401, 0.2090, 0.1657, 0.1224, 0.1752, 0.1933, 0.1465, 0.1879], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0111, 0.0093, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:54:11,214 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85607.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:54:23,551 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85625.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:54:31,765 INFO [finetune.py:976] (5/7) Epoch 15, batch 5450, loss[loss=0.1587, simple_loss=0.2273, pruned_loss=0.04507, over 4765.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2507, pruned_loss=0.05754, over 953337.27 frames. ], batch size: 27, lr: 3.47e-03, grad_scale: 64.0 +2023-03-26 18:54:37,630 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3143, 1.4609, 1.5076, 0.8357, 1.5268, 1.7122, 1.7656, 1.3689], + device='cuda:5'), covar=tensor([0.0937, 0.0584, 0.0538, 0.0504, 0.0415, 0.0565, 0.0314, 0.0691], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0152, 0.0125, 0.0130, 0.0132, 0.0128, 0.0144, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.3142e-05, 1.1103e-04, 8.9790e-05, 9.3079e-05, 9.3410e-05, 9.2333e-05, + 1.0382e-04, 1.0700e-04], device='cuda:5') +2023-03-26 18:54:41,082 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.884e+01 1.516e+02 1.902e+02 2.390e+02 5.288e+02, threshold=3.804e+02, percent-clipped=4.0 +2023-03-26 18:54:52,066 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=85655.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:55:16,886 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85686.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 18:55:17,972 INFO [finetune.py:976] (5/7) Epoch 15, batch 5500, loss[loss=0.1422, simple_loss=0.2115, pruned_loss=0.03643, over 4844.00 frames. ], tot_loss[loss=0.1802, simple_loss=0.2478, pruned_loss=0.05635, over 955833.52 frames. ], batch size: 44, lr: 3.47e-03, grad_scale: 64.0 +2023-03-26 18:56:02,559 INFO [finetune.py:976] (5/7) Epoch 15, batch 5550, loss[loss=0.1629, simple_loss=0.2291, pruned_loss=0.04839, over 4868.00 frames. ], tot_loss[loss=0.1816, simple_loss=0.2488, pruned_loss=0.05722, over 954700.26 frames. ], batch size: 31, lr: 3.47e-03, grad_scale: 64.0 +2023-03-26 18:56:06,722 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.623e+01 1.589e+02 1.875e+02 2.150e+02 4.153e+02, threshold=3.750e+02, percent-clipped=1.0 +2023-03-26 18:56:08,633 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6178, 1.5549, 1.9824, 3.0285, 2.0585, 2.3778, 1.0962, 2.4163], + device='cuda:5'), covar=tensor([0.1724, 0.1386, 0.1226, 0.0525, 0.0814, 0.1136, 0.1743, 0.0564], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0165, 0.0100, 0.0139, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:56:19,300 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85763.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:56:22,988 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7974, 1.3760, 0.8141, 1.5925, 2.0855, 1.5670, 1.6041, 1.6481], + device='cuda:5'), covar=tensor([0.1538, 0.2083, 0.2092, 0.1343, 0.1984, 0.2041, 0.1494, 0.1991], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0093, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:56:24,633 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85771.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:56:32,625 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85784.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:56:34,956 INFO [finetune.py:976] (5/7) Epoch 15, batch 5600, loss[loss=0.1502, simple_loss=0.2034, pruned_loss=0.04852, over 4327.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2541, pruned_loss=0.05897, over 954453.38 frames. ], batch size: 18, lr: 3.47e-03, grad_scale: 64.0 +2023-03-26 18:56:48,431 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=85811.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:56:49,632 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6085, 1.4431, 1.9248, 1.7872, 1.5514, 3.4957, 1.3875, 1.6065], + device='cuda:5'), covar=tensor([0.0867, 0.1772, 0.1113, 0.0958, 0.1671, 0.0189, 0.1439, 0.1719], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0078, 0.0092, 0.0081, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 18:56:53,121 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=85819.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:57:04,148 INFO [finetune.py:976] (5/7) Epoch 15, batch 5650, loss[loss=0.2272, simple_loss=0.2852, pruned_loss=0.08457, over 4139.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2555, pruned_loss=0.05906, over 954697.50 frames. ], batch size: 65, lr: 3.47e-03, grad_scale: 64.0 +2023-03-26 18:57:04,788 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85839.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:57:08,246 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.126e+02 1.563e+02 1.888e+02 2.328e+02 3.522e+02, threshold=3.776e+02, percent-clipped=0.0 +2023-03-26 18:57:26,029 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85875.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:57:32,561 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85886.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:57:33,722 INFO [finetune.py:976] (5/7) Epoch 15, batch 5700, loss[loss=0.1648, simple_loss=0.2268, pruned_loss=0.05137, over 4049.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2516, pruned_loss=0.0581, over 937002.00 frames. ], batch size: 17, lr: 3.47e-03, grad_scale: 64.0 +2023-03-26 18:58:02,782 INFO [finetune.py:976] (5/7) Epoch 16, batch 0, loss[loss=0.1694, simple_loss=0.2466, pruned_loss=0.04607, over 4842.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2466, pruned_loss=0.04607, over 4842.00 frames. ], batch size: 47, lr: 3.46e-03, grad_scale: 64.0 +2023-03-26 18:58:02,782 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 18:58:12,209 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7974, 1.3550, 0.9157, 1.5985, 2.1110, 1.1127, 1.6080, 1.6195], + device='cuda:5'), covar=tensor([0.1414, 0.1973, 0.1836, 0.1263, 0.1776, 0.1908, 0.1338, 0.1900], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0110, 0.0093, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 18:58:17,921 INFO [finetune.py:1010] (5/7) Epoch 16, validation: loss=0.1572, simple_loss=0.2278, pruned_loss=0.04329, over 2265189.00 frames. +2023-03-26 18:58:17,921 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 18:58:26,962 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85930.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 18:58:30,571 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85936.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:58:36,426 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.566e+02 1.783e+02 2.274e+02 8.459e+02, threshold=3.567e+02, percent-clipped=4.0 +2023-03-26 18:58:44,329 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85957.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:58:49,654 INFO [finetune.py:976] (5/7) Epoch 16, batch 50, loss[loss=0.1959, simple_loss=0.2646, pruned_loss=0.06365, over 4897.00 frames. ], tot_loss[loss=0.1928, simple_loss=0.2614, pruned_loss=0.06206, over 217710.99 frames. ], batch size: 36, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 18:58:59,779 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85981.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 18:59:05,873 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85991.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 18:59:14,744 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86002.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:59:17,350 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 18:59:23,677 INFO [finetune.py:976] (5/7) Epoch 16, batch 100, loss[loss=0.1975, simple_loss=0.2545, pruned_loss=0.07023, over 4818.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2551, pruned_loss=0.06095, over 380752.17 frames. ], batch size: 25, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 18:59:24,985 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86018.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 18:59:44,131 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.611e+02 1.877e+02 2.147e+02 3.763e+02, threshold=3.754e+02, percent-clipped=3.0 +2023-03-26 18:59:47,931 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86052.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:00:00,126 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86063.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:00:06,620 INFO [finetune.py:976] (5/7) Epoch 16, batch 150, loss[loss=0.1797, simple_loss=0.2576, pruned_loss=0.05091, over 4895.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2498, pruned_loss=0.05824, over 510026.61 frames. ], batch size: 35, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:00:08,577 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86069.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:00:17,337 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-03-26 19:00:27,239 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86084.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:00:43,682 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2148, 2.9847, 2.7616, 1.3138, 3.0349, 2.2090, 0.8131, 1.9019], + device='cuda:5'), covar=tensor([0.2515, 0.2093, 0.1976, 0.3795, 0.1384, 0.1221, 0.4401, 0.1834], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0174, 0.0160, 0.0129, 0.0158, 0.0123, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:00:50,258 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86113.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:00:51,952 INFO [finetune.py:976] (5/7) Epoch 16, batch 200, loss[loss=0.2005, simple_loss=0.2607, pruned_loss=0.07014, over 4944.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2496, pruned_loss=0.05862, over 608149.16 frames. ], batch size: 33, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:01:04,038 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86130.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:01:05,146 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86132.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:01:09,829 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86139.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:01:14,016 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.089e+02 1.565e+02 1.801e+02 2.285e+02 3.660e+02, threshold=3.601e+02, percent-clipped=0.0 +2023-03-26 19:01:27,191 INFO [finetune.py:976] (5/7) Epoch 16, batch 250, loss[loss=0.1939, simple_loss=0.266, pruned_loss=0.06093, over 4894.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2527, pruned_loss=0.05886, over 686554.73 frames. ], batch size: 35, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:01:40,817 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86186.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:01:41,401 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86187.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:02:00,656 INFO [finetune.py:976] (5/7) Epoch 16, batch 300, loss[loss=0.1603, simple_loss=0.2354, pruned_loss=0.04255, over 4738.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2538, pruned_loss=0.05845, over 747576.37 frames. ], batch size: 59, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:02:04,364 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0251, 1.8974, 1.7846, 2.0285, 2.4618, 1.9862, 1.9090, 1.7182], + device='cuda:5'), covar=tensor([0.1586, 0.1608, 0.1453, 0.1310, 0.1512, 0.1015, 0.1983, 0.1515], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0207, 0.0211, 0.0191, 0.0241, 0.0184, 0.0214, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:02:04,970 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1883, 2.0764, 1.6287, 2.1123, 2.0936, 1.8146, 2.4192, 2.2324], + device='cuda:5'), covar=tensor([0.1342, 0.2242, 0.3203, 0.2642, 0.2519, 0.1755, 0.3240, 0.1668], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0189, 0.0235, 0.0255, 0.0245, 0.0203, 0.0213, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:02:11,170 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86231.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:02:12,980 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86234.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:02:13,737 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.36 vs. limit=5.0 +2023-03-26 19:02:20,700 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.625e+02 1.967e+02 2.251e+02 5.649e+02, threshold=3.935e+02, percent-clipped=3.0 +2023-03-26 19:02:20,826 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86246.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:02:23,813 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9782, 4.9690, 4.6459, 2.6310, 5.0229, 3.9165, 0.9550, 3.5534], + device='cuda:5'), covar=tensor([0.1932, 0.1809, 0.1223, 0.2684, 0.0686, 0.0742, 0.4179, 0.1087], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0173, 0.0159, 0.0127, 0.0157, 0.0122, 0.0145, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:02:34,279 INFO [finetune.py:976] (5/7) Epoch 16, batch 350, loss[loss=0.1816, simple_loss=0.2522, pruned_loss=0.05551, over 4821.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2546, pruned_loss=0.058, over 793845.36 frames. ], batch size: 47, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:02:39,873 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1815, 1.3356, 1.3648, 0.7278, 1.2733, 1.5780, 1.6056, 1.2949], + device='cuda:5'), covar=tensor([0.0932, 0.0658, 0.0552, 0.0544, 0.0487, 0.0690, 0.0369, 0.0749], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0151, 0.0124, 0.0128, 0.0129, 0.0127, 0.0142, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.1856e-05, 1.0970e-04, 8.8804e-05, 9.1725e-05, 9.1320e-05, 9.1600e-05, + 1.0220e-04, 1.0557e-04], device='cuda:5') +2023-03-26 19:02:44,519 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86281.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:02:47,960 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86286.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:02:56,008 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 19:03:01,127 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86307.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:03:05,211 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86313.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:03:07,487 INFO [finetune.py:976] (5/7) Epoch 16, batch 400, loss[loss=0.1423, simple_loss=0.2046, pruned_loss=0.03997, over 4706.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2553, pruned_loss=0.05816, over 829834.04 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:03:15,905 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86329.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:03:34,375 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.307e+01 1.538e+02 1.774e+02 2.172e+02 4.200e+02, threshold=3.548e+02, percent-clipped=1.0 +2023-03-26 19:03:45,094 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86358.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:03:50,329 INFO [finetune.py:976] (5/7) Epoch 16, batch 450, loss[loss=0.2202, simple_loss=0.2691, pruned_loss=0.08565, over 4871.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2544, pruned_loss=0.05791, over 858501.44 frames. ], batch size: 34, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:04:18,722 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86408.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:04:24,032 INFO [finetune.py:976] (5/7) Epoch 16, batch 500, loss[loss=0.2001, simple_loss=0.2639, pruned_loss=0.06817, over 4915.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2522, pruned_loss=0.05771, over 881178.48 frames. ], batch size: 37, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:04:30,020 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86425.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:04:40,639 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86440.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:04:43,601 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86445.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:04:44,071 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.779e+01 1.556e+02 1.875e+02 2.226e+02 4.465e+02, threshold=3.750e+02, percent-clipped=2.0 +2023-03-26 19:04:52,203 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.53 vs. limit=5.0 +2023-03-26 19:04:57,163 INFO [finetune.py:976] (5/7) Epoch 16, batch 550, loss[loss=0.1643, simple_loss=0.2428, pruned_loss=0.0429, over 4762.00 frames. ], tot_loss[loss=0.1815, simple_loss=0.2494, pruned_loss=0.05683, over 897167.67 frames. ], batch size: 26, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:05:12,089 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 19:05:13,280 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 19:05:31,560 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86501.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:05:39,370 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 19:05:39,627 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86506.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:05:50,206 INFO [finetune.py:976] (5/7) Epoch 16, batch 600, loss[loss=0.2334, simple_loss=0.3059, pruned_loss=0.08039, over 4847.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2509, pruned_loss=0.05728, over 909871.67 frames. ], batch size: 44, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:06:01,804 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4508, 1.4054, 1.3877, 0.8720, 1.4444, 1.6302, 1.7173, 1.3138], + device='cuda:5'), covar=tensor([0.0876, 0.0561, 0.0458, 0.0470, 0.0447, 0.0553, 0.0273, 0.0575], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0153, 0.0124, 0.0129, 0.0131, 0.0128, 0.0143, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.2917e-05, 1.1111e-04, 8.9260e-05, 9.2568e-05, 9.2690e-05, 9.2248e-05, + 1.0293e-04, 1.0658e-04], device='cuda:5') +2023-03-26 19:06:04,182 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86531.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:06:09,395 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9001, 1.6725, 1.8578, 1.1310, 1.8234, 1.8958, 1.8868, 1.5100], + device='cuda:5'), covar=tensor([0.0550, 0.0748, 0.0706, 0.1007, 0.0700, 0.0753, 0.0610, 0.1216], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0134, 0.0140, 0.0122, 0.0123, 0.0140, 0.0141, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:06:14,603 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.101e+02 1.575e+02 1.922e+02 2.222e+02 3.111e+02, threshold=3.844e+02, percent-clipped=0.0 +2023-03-26 19:06:27,336 INFO [finetune.py:976] (5/7) Epoch 16, batch 650, loss[loss=0.2129, simple_loss=0.2818, pruned_loss=0.07202, over 4910.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.2549, pruned_loss=0.05889, over 919960.50 frames. ], batch size: 37, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:06:36,233 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86579.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:06:37,401 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86580.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:06:41,057 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86586.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:06:43,454 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0490, 1.7705, 2.3556, 1.5680, 2.1243, 2.3909, 1.7573, 2.4678], + device='cuda:5'), covar=tensor([0.1381, 0.2148, 0.1583, 0.2037, 0.0953, 0.1241, 0.2641, 0.0843], + device='cuda:5'), in_proj_covar=tensor([0.0195, 0.0207, 0.0194, 0.0192, 0.0179, 0.0216, 0.0220, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:06:52,268 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86602.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:06:59,430 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86613.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:07:01,142 INFO [finetune.py:976] (5/7) Epoch 16, batch 700, loss[loss=0.155, simple_loss=0.2307, pruned_loss=0.03962, over 4818.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.2575, pruned_loss=0.06021, over 929785.46 frames. ], batch size: 38, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:07:13,497 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86634.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 19:07:17,798 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86641.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:07:18,425 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1869, 2.1046, 1.8578, 2.1236, 2.0134, 2.0820, 2.0453, 2.9045], + device='cuda:5'), covar=tensor([0.4120, 0.5412, 0.3682, 0.5034, 0.5291, 0.2491, 0.4891, 0.1861], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0260, 0.0225, 0.0274, 0.0248, 0.0214, 0.0249, 0.0226], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:07:21,644 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.125e+02 1.513e+02 1.866e+02 2.326e+02 3.823e+02, threshold=3.732e+02, percent-clipped=0.0 +2023-03-26 19:07:29,552 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86658.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:07:31,329 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86661.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:07:34,798 INFO [finetune.py:976] (5/7) Epoch 16, batch 750, loss[loss=0.1754, simple_loss=0.2518, pruned_loss=0.04955, over 4854.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2584, pruned_loss=0.06004, over 936800.09 frames. ], batch size: 44, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:08:02,110 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86706.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:08:03,364 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86708.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:08:07,728 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 19:08:08,119 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7224, 2.6014, 2.1035, 1.1292, 2.3628, 2.0115, 1.9160, 2.2188], + device='cuda:5'), covar=tensor([0.0795, 0.0711, 0.1605, 0.1982, 0.1438, 0.2372, 0.2075, 0.1020], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0195, 0.0199, 0.0182, 0.0211, 0.0206, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:08:08,619 INFO [finetune.py:976] (5/7) Epoch 16, batch 800, loss[loss=0.1882, simple_loss=0.254, pruned_loss=0.06122, over 4879.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2566, pruned_loss=0.05907, over 940906.08 frames. ], batch size: 32, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:08:14,168 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86725.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:08:26,157 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 19:08:28,782 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.501e+02 1.840e+02 2.208e+02 4.378e+02, threshold=3.681e+02, percent-clipped=4.0 +2023-03-26 19:08:40,242 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86756.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:08:49,459 INFO [finetune.py:976] (5/7) Epoch 16, batch 850, loss[loss=0.1882, simple_loss=0.2657, pruned_loss=0.05535, over 4871.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2544, pruned_loss=0.05816, over 946497.92 frames. ], batch size: 34, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:08:54,219 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86773.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:09:09,591 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86796.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:09:13,591 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86801.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:09:21,908 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86814.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:09:23,014 INFO [finetune.py:976] (5/7) Epoch 16, batch 900, loss[loss=0.1587, simple_loss=0.2221, pruned_loss=0.04767, over 4873.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.252, pruned_loss=0.05757, over 946310.58 frames. ], batch size: 31, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:09:43,091 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.048e+02 1.502e+02 1.904e+02 2.205e+02 3.944e+02, threshold=3.808e+02, percent-clipped=2.0 +2023-03-26 19:09:56,621 INFO [finetune.py:976] (5/7) Epoch 16, batch 950, loss[loss=0.1885, simple_loss=0.255, pruned_loss=0.06099, over 4823.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2505, pruned_loss=0.05807, over 948667.07 frames. ], batch size: 40, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:10:02,698 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86875.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:10:04,480 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86878.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:10:13,931 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 19:10:14,471 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0464, 1.6398, 2.3041, 1.5686, 2.1401, 2.2603, 1.6789, 2.4119], + device='cuda:5'), covar=tensor([0.1264, 0.1945, 0.1465, 0.2009, 0.0817, 0.1421, 0.2720, 0.0752], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0205, 0.0194, 0.0191, 0.0178, 0.0215, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:10:20,385 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86902.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:10:40,037 INFO [finetune.py:976] (5/7) Epoch 16, batch 1000, loss[loss=0.2097, simple_loss=0.28, pruned_loss=0.06975, over 4829.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2532, pruned_loss=0.05875, over 951833.56 frames. ], batch size: 33, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:11:01,989 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86936.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:11:08,315 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86939.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:11:12,980 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.633e+02 1.920e+02 2.320e+02 4.350e+02, threshold=3.840e+02, percent-clipped=2.0 +2023-03-26 19:11:20,311 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=86950.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:11:34,824 INFO [finetune.py:976] (5/7) Epoch 16, batch 1050, loss[loss=0.2007, simple_loss=0.2693, pruned_loss=0.06603, over 4821.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2551, pruned_loss=0.05937, over 950845.40 frames. ], batch size: 45, lr: 3.46e-03, grad_scale: 32.0 +2023-03-26 19:11:39,836 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8626, 2.4134, 3.0687, 1.9250, 2.7868, 3.1091, 2.2451, 3.1906], + device='cuda:5'), covar=tensor([0.1377, 0.2028, 0.1581, 0.2367, 0.0957, 0.1504, 0.2670, 0.0881], + device='cuda:5'), in_proj_covar=tensor([0.0197, 0.0208, 0.0196, 0.0194, 0.0180, 0.0218, 0.0223, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:11:48,252 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4596, 1.3728, 1.3370, 1.4430, 0.9614, 2.8875, 1.0383, 1.4893], + device='cuda:5'), covar=tensor([0.3382, 0.2458, 0.2196, 0.2376, 0.1915, 0.0255, 0.2895, 0.1269], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0124, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:11:58,507 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87002.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:12:08,333 INFO [finetune.py:976] (5/7) Epoch 16, batch 1100, loss[loss=0.1776, simple_loss=0.2254, pruned_loss=0.06491, over 4422.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2557, pruned_loss=0.05941, over 949428.07 frames. ], batch size: 19, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:12:27,409 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.613e+02 1.835e+02 2.273e+02 4.124e+02, threshold=3.670e+02, percent-clipped=1.0 +2023-03-26 19:12:39,655 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87063.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:12:41,764 INFO [finetune.py:976] (5/7) Epoch 16, batch 1150, loss[loss=0.1657, simple_loss=0.234, pruned_loss=0.04869, over 4760.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2571, pruned_loss=0.06002, over 950557.70 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:13:01,408 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87096.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:13:04,425 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87101.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:13:15,213 INFO [finetune.py:976] (5/7) Epoch 16, batch 1200, loss[loss=0.1939, simple_loss=0.2565, pruned_loss=0.06558, over 4910.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2553, pruned_loss=0.05959, over 952274.72 frames. ], batch size: 37, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:13:15,282 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8593, 4.3550, 4.2222, 2.0215, 4.4962, 3.2749, 0.6822, 3.0296], + device='cuda:5'), covar=tensor([0.2578, 0.1958, 0.1359, 0.3565, 0.0777, 0.0916, 0.4622, 0.1512], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0175, 0.0160, 0.0129, 0.0158, 0.0123, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:13:28,130 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 19:13:33,239 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=87144.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:13:34,373 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.093e+01 1.544e+02 1.890e+02 2.251e+02 4.242e+02, threshold=3.781e+02, percent-clipped=1.0 +2023-03-26 19:13:36,665 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=87149.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:13:50,131 INFO [finetune.py:976] (5/7) Epoch 16, batch 1250, loss[loss=0.137, simple_loss=0.2037, pruned_loss=0.03516, over 4802.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2531, pruned_loss=0.0588, over 955131.09 frames. ], batch size: 51, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:13:53,124 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87170.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:14:14,992 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.43 vs. limit=5.0 +2023-03-26 19:14:31,064 INFO [finetune.py:976] (5/7) Epoch 16, batch 1300, loss[loss=0.1858, simple_loss=0.2541, pruned_loss=0.0587, over 4897.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2496, pruned_loss=0.05738, over 955123.90 frames. ], batch size: 35, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:14:38,194 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9748, 1.9886, 1.6870, 1.8758, 2.0192, 1.7334, 2.1764, 2.0497], + device='cuda:5'), covar=tensor([0.1172, 0.1835, 0.2453, 0.2138, 0.1935, 0.1361, 0.3082, 0.1452], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0188, 0.0234, 0.0253, 0.0245, 0.0202, 0.0211, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:14:40,589 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7095, 1.6333, 1.9736, 1.2510, 1.7593, 1.8748, 1.5818, 2.1850], + device='cuda:5'), covar=tensor([0.1286, 0.1972, 0.1181, 0.1697, 0.0862, 0.1349, 0.2647, 0.0806], + device='cuda:5'), in_proj_covar=tensor([0.0196, 0.0208, 0.0194, 0.0192, 0.0180, 0.0217, 0.0220, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:14:41,213 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87230.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:14:43,586 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87234.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:14:44,804 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87236.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:14:51,273 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.006e+02 1.578e+02 1.944e+02 2.250e+02 4.130e+02, threshold=3.887e+02, percent-clipped=1.0 +2023-03-26 19:15:04,397 INFO [finetune.py:976] (5/7) Epoch 16, batch 1350, loss[loss=0.2151, simple_loss=0.3006, pruned_loss=0.06479, over 4766.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.251, pruned_loss=0.05794, over 954950.59 frames. ], batch size: 54, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:15:16,953 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=87284.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:15:21,846 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87291.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:15:41,385 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 19:15:44,163 INFO [finetune.py:976] (5/7) Epoch 16, batch 1400, loss[loss=0.1852, simple_loss=0.2537, pruned_loss=0.05833, over 4805.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2538, pruned_loss=0.05865, over 953569.62 frames. ], batch size: 45, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:16:09,070 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8763, 1.9311, 1.5273, 2.0829, 2.4879, 2.0035, 1.7821, 1.5013], + device='cuda:5'), covar=tensor([0.2091, 0.1859, 0.1825, 0.1479, 0.1589, 0.1146, 0.2169, 0.1843], + device='cuda:5'), in_proj_covar=tensor([0.0239, 0.0208, 0.0210, 0.0191, 0.0241, 0.0184, 0.0214, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:16:19,255 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.619e+02 1.873e+02 2.376e+02 3.982e+02, threshold=3.745e+02, percent-clipped=1.0 +2023-03-26 19:16:31,542 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87358.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:16:38,549 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87362.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:16:41,379 INFO [finetune.py:976] (5/7) Epoch 16, batch 1450, loss[loss=0.2005, simple_loss=0.2592, pruned_loss=0.07091, over 4819.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2546, pruned_loss=0.05885, over 953528.65 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:17:13,254 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1911, 1.3428, 1.2900, 1.3756, 1.4367, 2.3480, 1.2887, 1.4130], + device='cuda:5'), covar=tensor([0.0966, 0.1618, 0.1299, 0.0934, 0.1514, 0.0414, 0.1334, 0.1579], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0078, 0.0093, 0.0081, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:17:18,471 INFO [finetune.py:976] (5/7) Epoch 16, batch 1500, loss[loss=0.1905, simple_loss=0.2575, pruned_loss=0.06172, over 4741.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2559, pruned_loss=0.05908, over 954680.62 frames. ], batch size: 27, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:17:23,385 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87423.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:17:24,179 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 19:17:39,131 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.635e+02 2.033e+02 2.421e+02 4.092e+02, threshold=4.066e+02, percent-clipped=1.0 +2023-03-26 19:17:48,792 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87461.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:17:52,198 INFO [finetune.py:976] (5/7) Epoch 16, batch 1550, loss[loss=0.1435, simple_loss=0.2072, pruned_loss=0.03995, over 4808.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.255, pruned_loss=0.05832, over 953014.24 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:17:55,188 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87470.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:18:18,837 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87505.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:18:23,576 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 19:18:25,477 INFO [finetune.py:976] (5/7) Epoch 16, batch 1600, loss[loss=0.2075, simple_loss=0.2654, pruned_loss=0.0748, over 4258.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2531, pruned_loss=0.05783, over 953766.45 frames. ], batch size: 65, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:18:27,238 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=87518.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:18:30,239 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87522.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:18:38,583 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87534.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:18:46,618 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.707e+01 1.405e+02 1.628e+02 1.991e+02 3.372e+02, threshold=3.256e+02, percent-clipped=0.0 +2023-03-26 19:18:59,345 INFO [finetune.py:976] (5/7) Epoch 16, batch 1650, loss[loss=0.1647, simple_loss=0.2415, pruned_loss=0.04391, over 4767.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.251, pruned_loss=0.0573, over 955221.52 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:18:59,465 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87566.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:19:00,666 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8814, 1.3734, 0.8859, 1.7441, 2.2530, 1.4872, 1.6306, 1.6457], + device='cuda:5'), covar=tensor([0.1491, 0.2119, 0.1979, 0.1198, 0.1736, 0.1805, 0.1485, 0.2143], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0092, 0.0119, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 19:19:10,146 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=87582.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:19:11,440 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87584.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:19:13,569 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87586.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:19:17,439 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87589.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:19:36,483 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87605.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:19:39,598 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-03-26 19:19:43,482 INFO [finetune.py:976] (5/7) Epoch 16, batch 1700, loss[loss=0.2273, simple_loss=0.2903, pruned_loss=0.08213, over 4828.00 frames. ], tot_loss[loss=0.1812, simple_loss=0.2492, pruned_loss=0.05659, over 954873.66 frames. ], batch size: 40, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:19:47,168 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9740, 4.8956, 4.6354, 2.5234, 4.9968, 3.8007, 0.9290, 3.5871], + device='cuda:5'), covar=tensor([0.2425, 0.1777, 0.1487, 0.3283, 0.0722, 0.0865, 0.4634, 0.1298], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0174, 0.0158, 0.0128, 0.0156, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:19:47,411 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.06 vs. limit=5.0 +2023-03-26 19:19:48,906 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8309, 4.0443, 3.8710, 2.0628, 4.1753, 3.1361, 0.7046, 2.9702], + device='cuda:5'), covar=tensor([0.2122, 0.2057, 0.1324, 0.3025, 0.0867, 0.0902, 0.4310, 0.1314], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0174, 0.0158, 0.0128, 0.0156, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:20:03,777 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87645.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:20:04,231 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.164e+02 1.606e+02 1.879e+02 2.372e+02 9.403e+02, threshold=3.758e+02, percent-clipped=6.0 +2023-03-26 19:20:06,857 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87650.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:20:11,628 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87658.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:20:15,091 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5206, 1.4426, 2.0228, 2.8328, 1.9329, 2.2072, 1.0892, 2.4026], + device='cuda:5'), covar=tensor([0.1798, 0.1488, 0.1179, 0.0623, 0.0895, 0.1356, 0.1769, 0.0580], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0165, 0.0101, 0.0139, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 19:20:16,842 INFO [finetune.py:976] (5/7) Epoch 16, batch 1750, loss[loss=0.2098, simple_loss=0.2867, pruned_loss=0.06649, over 4817.00 frames. ], tot_loss[loss=0.1821, simple_loss=0.2502, pruned_loss=0.05705, over 953989.78 frames. ], batch size: 38, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:20:16,970 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87666.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:20:18,214 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9186, 1.2900, 1.9410, 1.8905, 1.6840, 1.6096, 1.8153, 1.7127], + device='cuda:5'), covar=tensor([0.3763, 0.3946, 0.3305, 0.3587, 0.4569, 0.3736, 0.4423, 0.3276], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0239, 0.0258, 0.0270, 0.0269, 0.0242, 0.0281, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:20:44,154 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=87706.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 19:20:50,639 INFO [finetune.py:976] (5/7) Epoch 16, batch 1800, loss[loss=0.2087, simple_loss=0.2761, pruned_loss=0.07062, over 4832.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2531, pruned_loss=0.05707, over 954995.46 frames. ], batch size: 33, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:20:51,923 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87718.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:21:02,315 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.12 vs. limit=5.0 +2023-03-26 19:21:13,298 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.635e+02 1.907e+02 2.399e+02 5.758e+02, threshold=3.813e+02, percent-clipped=1.0 +2023-03-26 19:21:36,192 INFO [finetune.py:976] (5/7) Epoch 16, batch 1850, loss[loss=0.2044, simple_loss=0.279, pruned_loss=0.06486, over 4722.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2547, pruned_loss=0.05769, over 956467.31 frames. ], batch size: 59, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:22:15,098 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8119, 1.7631, 1.5876, 1.7383, 1.4086, 4.3653, 1.6439, 2.0205], + device='cuda:5'), covar=tensor([0.3208, 0.2408, 0.2130, 0.2245, 0.1561, 0.0121, 0.2434, 0.1211], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0122, 0.0125, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:22:22,285 INFO [finetune.py:976] (5/7) Epoch 16, batch 1900, loss[loss=0.1571, simple_loss=0.2332, pruned_loss=0.04052, over 4857.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2553, pruned_loss=0.05802, over 953472.75 frames. ], batch size: 31, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:22:23,004 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87817.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:22:41,851 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 1.530e+02 1.792e+02 2.215e+02 4.706e+02, threshold=3.584e+02, percent-clipped=3.0 +2023-03-26 19:22:53,010 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87861.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:22:55,973 INFO [finetune.py:976] (5/7) Epoch 16, batch 1950, loss[loss=0.1783, simple_loss=0.238, pruned_loss=0.05934, over 4828.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2539, pruned_loss=0.0578, over 954028.58 frames. ], batch size: 33, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:23:09,193 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87886.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:23:11,756 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.84 vs. limit=5.0 +2023-03-26 19:23:27,464 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 19:23:29,597 INFO [finetune.py:976] (5/7) Epoch 16, batch 2000, loss[loss=0.2156, simple_loss=0.2685, pruned_loss=0.08136, over 4917.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.252, pruned_loss=0.05763, over 954775.25 frames. ], batch size: 36, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:23:41,098 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=87934.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:23:41,150 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5556, 1.4896, 1.4666, 1.4787, 1.1306, 2.9019, 1.1008, 1.5235], + device='cuda:5'), covar=tensor([0.3302, 0.2450, 0.2065, 0.2328, 0.1707, 0.0244, 0.2683, 0.1236], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0124, 0.0114, 0.0097, 0.0097, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:23:45,287 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87940.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:23:48,824 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87945.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:23:49,302 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.562e+02 1.812e+02 2.199e+02 5.123e+02, threshold=3.624e+02, percent-clipped=1.0 +2023-03-26 19:23:59,908 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87961.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 19:24:01,117 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6515, 1.5638, 1.9392, 1.9297, 1.7897, 3.7032, 1.5553, 1.7096], + device='cuda:5'), covar=tensor([0.0940, 0.1866, 0.1086, 0.0916, 0.1477, 0.0220, 0.1374, 0.1651], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0078, 0.0092, 0.0080, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:24:02,864 INFO [finetune.py:976] (5/7) Epoch 16, batch 2050, loss[loss=0.1758, simple_loss=0.2413, pruned_loss=0.05516, over 4904.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.2494, pruned_loss=0.05619, over 956424.23 frames. ], batch size: 43, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:24:07,885 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-26 19:24:37,647 INFO [finetune.py:976] (5/7) Epoch 16, batch 2100, loss[loss=0.2215, simple_loss=0.2946, pruned_loss=0.0742, over 4870.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2498, pruned_loss=0.05689, over 955174.41 frames. ], batch size: 44, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:24:41,372 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88018.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:24:59,877 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.339e+01 1.671e+02 1.963e+02 2.403e+02 4.597e+02, threshold=3.926e+02, percent-clipped=2.0 +2023-03-26 19:25:13,435 INFO [finetune.py:976] (5/7) Epoch 16, batch 2150, loss[loss=0.19, simple_loss=0.2616, pruned_loss=0.05922, over 4935.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2533, pruned_loss=0.05836, over 953212.65 frames. ], batch size: 38, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:25:13,503 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88066.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:25:35,448 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88100.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:25:46,510 INFO [finetune.py:976] (5/7) Epoch 16, batch 2200, loss[loss=0.1556, simple_loss=0.2262, pruned_loss=0.04251, over 4746.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2563, pruned_loss=0.05922, over 954652.80 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:25:47,224 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88117.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:25:52,687 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4187, 3.8676, 4.0618, 4.1313, 4.2199, 4.0259, 4.4934, 1.9404], + device='cuda:5'), covar=tensor([0.0636, 0.0894, 0.0702, 0.0903, 0.0976, 0.1208, 0.0548, 0.4428], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0244, 0.0276, 0.0291, 0.0335, 0.0282, 0.0298, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:25:58,176 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0258, 1.9424, 1.5336, 1.9368, 1.9645, 1.6584, 2.2074, 1.9572], + device='cuda:5'), covar=tensor([0.1315, 0.2000, 0.3143, 0.2367, 0.2477, 0.1700, 0.3207, 0.1732], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0187, 0.0234, 0.0253, 0.0245, 0.0202, 0.0212, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:26:05,785 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.079e+02 1.658e+02 1.940e+02 2.471e+02 6.986e+02, threshold=3.880e+02, percent-clipped=3.0 +2023-03-26 19:26:15,414 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88161.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 19:26:15,433 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88161.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:26:18,733 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88165.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:26:19,283 INFO [finetune.py:976] (5/7) Epoch 16, batch 2250, loss[loss=0.2009, simple_loss=0.2854, pruned_loss=0.05817, over 4817.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2578, pruned_loss=0.05983, over 953189.87 frames. ], batch size: 39, lr: 3.45e-03, grad_scale: 32.0 +2023-03-26 19:26:56,476 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88209.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:27:05,827 INFO [finetune.py:976] (5/7) Epoch 16, batch 2300, loss[loss=0.1939, simple_loss=0.2635, pruned_loss=0.06221, over 4788.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2569, pruned_loss=0.05929, over 953756.26 frames. ], batch size: 29, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:27:21,886 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4355, 2.2940, 1.7931, 2.5505, 2.4255, 1.9433, 2.8836, 2.4374], + device='cuda:5'), covar=tensor([0.1407, 0.2370, 0.3253, 0.2745, 0.2581, 0.1742, 0.3214, 0.1912], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0188, 0.0234, 0.0253, 0.0245, 0.0203, 0.0212, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:27:29,734 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88240.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:27:33,278 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88245.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:27:34,391 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.462e+02 1.840e+02 2.103e+02 3.666e+02, threshold=3.679e+02, percent-clipped=0.0 +2023-03-26 19:27:43,884 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88261.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 19:27:47,285 INFO [finetune.py:976] (5/7) Epoch 16, batch 2350, loss[loss=0.1599, simple_loss=0.2468, pruned_loss=0.03654, over 4836.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2541, pruned_loss=0.05815, over 954321.69 frames. ], batch size: 49, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:27:48,592 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3040, 1.4407, 1.4515, 1.6485, 1.5127, 3.0092, 1.3308, 1.4989], + device='cuda:5'), covar=tensor([0.0973, 0.1728, 0.1082, 0.0962, 0.1598, 0.0275, 0.1444, 0.1727], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0074, 0.0077, 0.0092, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:27:57,883 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-26 19:28:01,664 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88288.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:28:03,556 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88291.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:28:04,697 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88293.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:28:15,423 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88309.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:28:20,095 INFO [finetune.py:976] (5/7) Epoch 16, batch 2400, loss[loss=0.1572, simple_loss=0.2289, pruned_loss=0.04275, over 4770.00 frames. ], tot_loss[loss=0.1825, simple_loss=0.2508, pruned_loss=0.05704, over 954168.60 frames. ], batch size: 27, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:28:40,399 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.006e+02 1.525e+02 1.766e+02 2.106e+02 3.774e+02, threshold=3.532e+02, percent-clipped=1.0 +2023-03-26 19:28:44,064 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88352.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:28:52,862 INFO [finetune.py:976] (5/7) Epoch 16, batch 2450, loss[loss=0.1949, simple_loss=0.2462, pruned_loss=0.07174, over 4139.00 frames. ], tot_loss[loss=0.1807, simple_loss=0.2486, pruned_loss=0.05646, over 954536.60 frames. ], batch size: 18, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:29:26,834 INFO [finetune.py:976] (5/7) Epoch 16, batch 2500, loss[loss=0.1768, simple_loss=0.2452, pruned_loss=0.05415, over 4756.00 frames. ], tot_loss[loss=0.1816, simple_loss=0.2498, pruned_loss=0.05669, over 953342.87 frames. ], batch size: 59, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:29:48,235 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.125e+02 1.743e+02 2.000e+02 2.601e+02 5.270e+02, threshold=4.000e+02, percent-clipped=5.0 +2023-03-26 19:29:54,239 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88456.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:30:00,709 INFO [finetune.py:976] (5/7) Epoch 16, batch 2550, loss[loss=0.1458, simple_loss=0.2311, pruned_loss=0.0302, over 4795.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2525, pruned_loss=0.05669, over 954610.39 frames. ], batch size: 29, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:30:33,909 INFO [finetune.py:976] (5/7) Epoch 16, batch 2600, loss[loss=0.1325, simple_loss=0.2088, pruned_loss=0.02807, over 4777.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2535, pruned_loss=0.05682, over 955545.71 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:30:49,105 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4391, 1.3894, 1.7209, 1.7676, 1.4392, 3.3974, 1.3287, 1.4827], + device='cuda:5'), covar=tensor([0.0989, 0.1775, 0.1230, 0.0966, 0.1722, 0.0228, 0.1431, 0.1764], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0073, 0.0077, 0.0092, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:30:55,464 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.436e+01 1.666e+02 1.940e+02 2.279e+02 3.712e+02, threshold=3.880e+02, percent-clipped=0.0 +2023-03-26 19:31:03,928 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7349, 1.6795, 1.5805, 1.7086, 1.1705, 3.7103, 1.4502, 1.9177], + device='cuda:5'), covar=tensor([0.3359, 0.2471, 0.2194, 0.2432, 0.1857, 0.0196, 0.2470, 0.1307], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0115, 0.0120, 0.0124, 0.0114, 0.0097, 0.0096, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:31:07,484 INFO [finetune.py:976] (5/7) Epoch 16, batch 2650, loss[loss=0.1653, simple_loss=0.2393, pruned_loss=0.04566, over 4822.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.2559, pruned_loss=0.05838, over 952144.13 frames. ], batch size: 47, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:31:41,337 INFO [finetune.py:976] (5/7) Epoch 16, batch 2700, loss[loss=0.1729, simple_loss=0.2332, pruned_loss=0.05633, over 4912.00 frames. ], tot_loss[loss=0.1845, simple_loss=0.2541, pruned_loss=0.05745, over 954055.09 frames. ], batch size: 36, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:32:03,830 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4510, 3.8760, 4.0585, 4.1858, 4.2296, 3.9429, 4.4946, 1.7909], + device='cuda:5'), covar=tensor([0.0649, 0.0815, 0.0757, 0.0827, 0.1034, 0.1240, 0.0614, 0.4700], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0244, 0.0276, 0.0291, 0.0334, 0.0282, 0.0297, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:32:05,539 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.033e+02 1.504e+02 1.815e+02 2.296e+02 4.078e+02, threshold=3.631e+02, percent-clipped=1.0 +2023-03-26 19:32:05,627 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88647.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:32:26,939 INFO [finetune.py:976] (5/7) Epoch 16, batch 2750, loss[loss=0.2144, simple_loss=0.2717, pruned_loss=0.07849, over 4847.00 frames. ], tot_loss[loss=0.1825, simple_loss=0.2513, pruned_loss=0.05684, over 955779.44 frames. ], batch size: 44, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:32:32,220 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4414, 1.4789, 1.8446, 1.7380, 1.6509, 3.4875, 1.4396, 1.6119], + device='cuda:5'), covar=tensor([0.1029, 0.1826, 0.1097, 0.0977, 0.1535, 0.0209, 0.1413, 0.1677], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0081, 0.0074, 0.0077, 0.0092, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:33:17,053 INFO [finetune.py:976] (5/7) Epoch 16, batch 2800, loss[loss=0.1496, simple_loss=0.2247, pruned_loss=0.03722, over 4172.00 frames. ], tot_loss[loss=0.1808, simple_loss=0.2488, pruned_loss=0.05635, over 957991.68 frames. ], batch size: 18, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:33:37,859 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.207e+02 1.516e+02 1.815e+02 2.286e+02 3.246e+02, threshold=3.631e+02, percent-clipped=0.0 +2023-03-26 19:33:44,953 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88756.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:33:50,910 INFO [finetune.py:976] (5/7) Epoch 16, batch 2850, loss[loss=0.213, simple_loss=0.2863, pruned_loss=0.06988, over 4806.00 frames. ], tot_loss[loss=0.1802, simple_loss=0.248, pruned_loss=0.05625, over 956771.55 frames. ], batch size: 41, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:34:01,185 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9431, 1.0935, 1.8266, 1.8247, 1.6636, 1.6045, 1.7104, 1.7844], + device='cuda:5'), covar=tensor([0.3995, 0.4119, 0.3658, 0.3950, 0.5174, 0.3982, 0.4637, 0.3514], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0236, 0.0256, 0.0269, 0.0267, 0.0241, 0.0278, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:34:17,619 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88804.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:34:24,822 INFO [finetune.py:976] (5/7) Epoch 16, batch 2900, loss[loss=0.2177, simple_loss=0.2864, pruned_loss=0.07448, over 4844.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2515, pruned_loss=0.05755, over 954495.14 frames. ], batch size: 47, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:34:45,212 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.025e+02 1.632e+02 1.968e+02 2.500e+02 4.348e+02, threshold=3.936e+02, percent-clipped=6.0 +2023-03-26 19:34:45,947 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9966, 1.5805, 2.3617, 1.4378, 2.0794, 2.2433, 1.5489, 2.3666], + device='cuda:5'), covar=tensor([0.1317, 0.2167, 0.1601, 0.2157, 0.0961, 0.1503, 0.2803, 0.0888], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0205, 0.0192, 0.0191, 0.0178, 0.0214, 0.0218, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:34:47,614 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1621, 2.9072, 2.7675, 1.2036, 3.0353, 2.2481, 0.5733, 1.8621], + device='cuda:5'), covar=tensor([0.2800, 0.2077, 0.1921, 0.3817, 0.1380, 0.1239, 0.4518, 0.1827], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0175, 0.0159, 0.0128, 0.0157, 0.0123, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:34:58,808 INFO [finetune.py:976] (5/7) Epoch 16, batch 2950, loss[loss=0.1915, simple_loss=0.2668, pruned_loss=0.05813, over 4733.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2542, pruned_loss=0.0584, over 954713.20 frames. ], batch size: 59, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:34:59,518 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8851, 4.2759, 4.0413, 2.3094, 4.3430, 3.2767, 1.0862, 3.1096], + device='cuda:5'), covar=tensor([0.2235, 0.1682, 0.1354, 0.3246, 0.0809, 0.0943, 0.4508, 0.1384], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0176, 0.0159, 0.0129, 0.0157, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:35:32,640 INFO [finetune.py:976] (5/7) Epoch 16, batch 3000, loss[loss=0.1388, simple_loss=0.1935, pruned_loss=0.04208, over 4165.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2548, pruned_loss=0.05838, over 954053.23 frames. ], batch size: 17, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:35:32,640 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 19:35:38,859 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9940, 1.8054, 1.9892, 1.3356, 1.9706, 1.9691, 1.9635, 1.6152], + device='cuda:5'), covar=tensor([0.0553, 0.0705, 0.0577, 0.0846, 0.0704, 0.0643, 0.0574, 0.1229], + device='cuda:5'), in_proj_covar=tensor([0.0136, 0.0137, 0.0144, 0.0125, 0.0125, 0.0143, 0.0144, 0.0167], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:35:41,872 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4356, 1.3669, 1.3178, 1.4558, 1.7362, 1.5982, 1.3938, 1.2883], + device='cuda:5'), covar=tensor([0.0399, 0.0364, 0.0586, 0.0342, 0.0244, 0.0436, 0.0362, 0.0449], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0108, 0.0143, 0.0112, 0.0099, 0.0107, 0.0097, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2988e-05, 8.3872e-05, 1.1301e-04, 8.6599e-05, 7.7392e-05, 7.8828e-05, + 7.3023e-05, 8.2483e-05], device='cuda:5') +2023-03-26 19:35:49,227 INFO [finetune.py:1010] (5/7) Epoch 16, validation: loss=0.1563, simple_loss=0.2263, pruned_loss=0.04316, over 2265189.00 frames. +2023-03-26 19:35:49,228 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 19:35:58,761 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5957, 3.7795, 3.5536, 1.7753, 3.9386, 2.8639, 0.9405, 2.6501], + device='cuda:5'), covar=tensor([0.2507, 0.1543, 0.1516, 0.3145, 0.0823, 0.1047, 0.4064, 0.1347], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0130, 0.0158, 0.0124, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:36:10,682 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.661e+02 1.990e+02 2.439e+02 3.546e+02, threshold=3.980e+02, percent-clipped=0.0 +2023-03-26 19:36:11,274 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88947.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:36:23,198 INFO [finetune.py:976] (5/7) Epoch 16, batch 3050, loss[loss=0.2074, simple_loss=0.2757, pruned_loss=0.0696, over 4874.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2568, pruned_loss=0.05888, over 955428.42 frames. ], batch size: 32, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:36:43,519 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=88995.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:36:57,480 INFO [finetune.py:976] (5/7) Epoch 16, batch 3100, loss[loss=0.1422, simple_loss=0.2082, pruned_loss=0.03807, over 4389.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2545, pruned_loss=0.05805, over 957079.97 frames. ], batch size: 19, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:37:03,373 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7907, 1.6703, 1.5948, 1.7656, 1.3001, 3.7684, 1.4978, 1.9692], + device='cuda:5'), covar=tensor([0.3127, 0.2408, 0.2162, 0.2350, 0.1732, 0.0170, 0.2448, 0.1260], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0116, 0.0120, 0.0124, 0.0115, 0.0097, 0.0097, 0.0097], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:37:06,394 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89027.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:37:20,959 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.966e+01 1.506e+02 1.838e+02 2.198e+02 3.411e+02, threshold=3.676e+02, percent-clipped=0.0 +2023-03-26 19:37:23,613 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1107, 1.8026, 2.1088, 1.9858, 1.7639, 1.7862, 2.0293, 1.9601], + device='cuda:5'), covar=tensor([0.4105, 0.4152, 0.3348, 0.4531, 0.5235, 0.4393, 0.5161, 0.3196], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0239, 0.0258, 0.0272, 0.0270, 0.0243, 0.0280, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:37:33,684 INFO [finetune.py:976] (5/7) Epoch 16, batch 3150, loss[loss=0.1871, simple_loss=0.259, pruned_loss=0.05765, over 4794.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2521, pruned_loss=0.05789, over 952578.18 frames. ], batch size: 29, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:37:36,957 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-26 19:37:56,581 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89088.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:38:10,465 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1530, 1.1477, 1.0991, 1.2415, 1.4289, 1.3949, 1.2241, 1.1280], + device='cuda:5'), covar=tensor([0.0427, 0.0311, 0.0603, 0.0294, 0.0199, 0.0381, 0.0351, 0.0350], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0107, 0.0143, 0.0111, 0.0099, 0.0106, 0.0097, 0.0107], + device='cuda:5'), out_proj_covar=tensor([7.2748e-05, 8.3197e-05, 1.1274e-04, 8.6042e-05, 7.6843e-05, 7.8498e-05, + 7.2631e-05, 8.1930e-05], device='cuda:5') +2023-03-26 19:38:25,685 INFO [finetune.py:976] (5/7) Epoch 16, batch 3200, loss[loss=0.2054, simple_loss=0.2603, pruned_loss=0.07528, over 4925.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.2499, pruned_loss=0.05783, over 952849.21 frames. ], batch size: 38, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:38:50,100 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.129e+02 1.609e+02 1.908e+02 2.339e+02 4.086e+02, threshold=3.816e+02, percent-clipped=1.0 +2023-03-26 19:39:02,085 INFO [finetune.py:976] (5/7) Epoch 16, batch 3250, loss[loss=0.2058, simple_loss=0.2641, pruned_loss=0.07369, over 4839.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2511, pruned_loss=0.0582, over 952584.99 frames. ], batch size: 33, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:39:03,390 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7037, 2.3703, 1.8846, 0.9308, 2.1417, 2.0418, 1.9077, 2.1854], + device='cuda:5'), covar=tensor([0.0828, 0.0775, 0.1549, 0.2101, 0.1286, 0.2298, 0.2274, 0.0866], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0194, 0.0198, 0.0181, 0.0211, 0.0204, 0.0221, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:39:04,506 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89169.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:39:35,940 INFO [finetune.py:976] (5/7) Epoch 16, batch 3300, loss[loss=0.1837, simple_loss=0.2741, pruned_loss=0.04664, over 4802.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2542, pruned_loss=0.05886, over 952073.79 frames. ], batch size: 45, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:39:40,806 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4134, 3.8418, 4.0556, 4.2892, 4.1795, 3.9881, 4.4843, 1.4862], + device='cuda:5'), covar=tensor([0.0728, 0.0940, 0.0820, 0.0871, 0.1147, 0.1591, 0.0687, 0.5650], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0247, 0.0277, 0.0294, 0.0335, 0.0283, 0.0299, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:39:45,090 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89230.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:39:56,765 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.235e+02 1.765e+02 2.004e+02 2.308e+02 3.942e+02, threshold=4.007e+02, percent-clipped=1.0 +2023-03-26 19:40:04,011 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9470, 1.2745, 1.9237, 1.8946, 1.7147, 1.6541, 1.8607, 1.8152], + device='cuda:5'), covar=tensor([0.3562, 0.3733, 0.3212, 0.3498, 0.4625, 0.3488, 0.4459, 0.3001], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0238, 0.0258, 0.0271, 0.0269, 0.0243, 0.0279, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:40:09,186 INFO [finetune.py:976] (5/7) Epoch 16, batch 3350, loss[loss=0.1815, simple_loss=0.2671, pruned_loss=0.04794, over 4820.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2564, pruned_loss=0.05968, over 953858.85 frames. ], batch size: 40, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:40:39,446 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-26 19:40:42,689 INFO [finetune.py:976] (5/7) Epoch 16, batch 3400, loss[loss=0.179, simple_loss=0.2563, pruned_loss=0.05087, over 4882.00 frames. ], tot_loss[loss=0.1881, simple_loss=0.2567, pruned_loss=0.05981, over 954446.64 frames. ], batch size: 43, lr: 3.44e-03, grad_scale: 32.0 +2023-03-26 19:40:57,601 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4065, 1.0123, 0.8102, 1.3088, 1.7680, 0.8826, 1.2214, 1.3665], + device='cuda:5'), covar=tensor([0.1759, 0.2530, 0.2104, 0.1391, 0.2193, 0.2801, 0.1741, 0.2333], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0095, 0.0111, 0.0093, 0.0119, 0.0095, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 19:41:12,553 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.581e+02 1.832e+02 2.219e+02 5.301e+02, threshold=3.664e+02, percent-clipped=1.0 +2023-03-26 19:41:24,425 INFO [finetune.py:976] (5/7) Epoch 16, batch 3450, loss[loss=0.1365, simple_loss=0.2075, pruned_loss=0.03271, over 4750.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2561, pruned_loss=0.05932, over 952538.29 frames. ], batch size: 27, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:41:26,206 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2152, 2.2430, 2.3667, 1.7324, 2.2607, 2.3241, 2.3413, 1.9563], + device='cuda:5'), covar=tensor([0.0618, 0.0596, 0.0637, 0.0866, 0.0596, 0.0690, 0.0627, 0.1032], + device='cuda:5'), in_proj_covar=tensor([0.0135, 0.0136, 0.0144, 0.0125, 0.0125, 0.0142, 0.0144, 0.0167], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:41:28,119 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-26 19:41:29,850 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89374.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:41:35,735 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89383.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:41:52,303 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.65 vs. limit=5.0 +2023-03-26 19:41:58,330 INFO [finetune.py:976] (5/7) Epoch 16, batch 3500, loss[loss=0.1917, simple_loss=0.2577, pruned_loss=0.06286, over 4861.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.2538, pruned_loss=0.05869, over 953357.67 frames. ], batch size: 49, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:42:04,433 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89425.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:42:10,955 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89435.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:42:13,397 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89439.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:42:18,650 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.936e+01 1.517e+02 1.946e+02 2.225e+02 4.216e+02, threshold=3.891e+02, percent-clipped=3.0 +2023-03-26 19:42:31,137 INFO [finetune.py:976] (5/7) Epoch 16, batch 3550, loss[loss=0.1606, simple_loss=0.2317, pruned_loss=0.04477, over 4697.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2505, pruned_loss=0.05763, over 952156.64 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:42:43,949 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89486.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:42:53,371 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89500.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:42:59,272 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89509.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:43:06,069 INFO [finetune.py:976] (5/7) Epoch 16, batch 3600, loss[loss=0.1523, simple_loss=0.2282, pruned_loss=0.03819, over 4756.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.05701, over 952191.95 frames. ], batch size: 27, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:43:14,218 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89525.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:43:26,116 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4637, 1.7299, 0.8964, 2.3276, 2.6005, 1.9233, 1.9368, 2.2181], + device='cuda:5'), covar=tensor([0.1874, 0.2845, 0.2540, 0.1395, 0.2091, 0.2471, 0.1972, 0.2589], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0095, 0.0111, 0.0093, 0.0119, 0.0095, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 19:43:37,731 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.924e+01 1.526e+02 1.890e+02 2.215e+02 3.895e+02, threshold=3.780e+02, percent-clipped=1.0 +2023-03-26 19:44:03,078 INFO [finetune.py:976] (5/7) Epoch 16, batch 3650, loss[loss=0.1904, simple_loss=0.2526, pruned_loss=0.06412, over 4902.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2507, pruned_loss=0.05767, over 953992.33 frames. ], batch size: 35, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:44:06,120 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89570.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:44:16,927 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3273, 1.2463, 1.2335, 1.3786, 1.6176, 1.4758, 1.3436, 1.1694], + device='cuda:5'), covar=tensor([0.0426, 0.0327, 0.0680, 0.0313, 0.0237, 0.0562, 0.0424, 0.0484], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0109, 0.0144, 0.0113, 0.0100, 0.0108, 0.0099, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3774e-05, 8.4432e-05, 1.1414e-04, 8.7184e-05, 7.7849e-05, 7.9528e-05, + 7.3997e-05, 8.2808e-05], device='cuda:5') +2023-03-26 19:44:18,677 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5024, 1.4501, 1.7842, 2.7544, 1.9522, 2.2837, 1.0513, 2.3188], + device='cuda:5'), covar=tensor([0.1779, 0.1622, 0.1468, 0.0951, 0.0886, 0.1540, 0.1895, 0.0600], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0135, 0.0167, 0.0101, 0.0140, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 19:44:36,729 INFO [finetune.py:976] (5/7) Epoch 16, batch 3700, loss[loss=0.1727, simple_loss=0.2518, pruned_loss=0.04679, over 4801.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.2543, pruned_loss=0.05851, over 953167.11 frames. ], batch size: 45, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:44:57,075 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.548e+01 1.593e+02 1.994e+02 2.376e+02 3.738e+02, threshold=3.989e+02, percent-clipped=0.0 +2023-03-26 19:45:03,674 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0513, 1.9857, 1.9591, 2.0011, 1.5235, 3.5398, 1.7048, 2.1729], + device='cuda:5'), covar=tensor([0.2904, 0.2209, 0.1747, 0.2063, 0.1567, 0.0237, 0.2334, 0.1075], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0115, 0.0120, 0.0124, 0.0114, 0.0097, 0.0096, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:45:10,212 INFO [finetune.py:976] (5/7) Epoch 16, batch 3750, loss[loss=0.2148, simple_loss=0.2883, pruned_loss=0.07067, over 4889.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2555, pruned_loss=0.05901, over 951927.67 frames. ], batch size: 32, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:45:19,324 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89680.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:45:21,128 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89683.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:45:43,306 INFO [finetune.py:976] (5/7) Epoch 16, batch 3800, loss[loss=0.1761, simple_loss=0.2534, pruned_loss=0.04939, over 4719.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2557, pruned_loss=0.0583, over 953351.21 frames. ], batch size: 54, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:45:52,763 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89730.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:45:53,366 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=89731.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:46:00,031 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89741.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:46:03,523 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.549e+02 1.882e+02 2.353e+02 4.344e+02, threshold=3.764e+02, percent-clipped=2.0 +2023-03-26 19:46:19,024 INFO [finetune.py:976] (5/7) Epoch 16, batch 3850, loss[loss=0.188, simple_loss=0.2481, pruned_loss=0.06395, over 4349.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2541, pruned_loss=0.0577, over 951866.17 frames. ], batch size: 66, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:46:29,118 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89781.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:46:38,085 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89795.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 19:46:52,160 INFO [finetune.py:976] (5/7) Epoch 16, batch 3900, loss[loss=0.1924, simple_loss=0.2498, pruned_loss=0.0675, over 4824.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2516, pruned_loss=0.05757, over 953675.86 frames. ], batch size: 39, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:46:58,277 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89825.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:46:59,830 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-03-26 19:47:12,473 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.500e+02 1.857e+02 2.274e+02 5.172e+02, threshold=3.715e+02, percent-clipped=1.0 +2023-03-26 19:47:23,893 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89865.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:47:23,926 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89865.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:47:24,442 INFO [finetune.py:976] (5/7) Epoch 16, batch 3950, loss[loss=0.1383, simple_loss=0.21, pruned_loss=0.03327, over 4922.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.2484, pruned_loss=0.05665, over 955732.93 frames. ], batch size: 43, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:47:29,792 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=89873.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:47:57,667 INFO [finetune.py:976] (5/7) Epoch 16, batch 4000, loss[loss=0.2038, simple_loss=0.2671, pruned_loss=0.07032, over 4902.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.248, pruned_loss=0.05643, over 954877.75 frames. ], batch size: 35, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:48:04,751 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89926.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 19:48:18,302 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.466e+01 1.676e+02 1.974e+02 2.469e+02 4.779e+02, threshold=3.947e+02, percent-clipped=6.0 +2023-03-26 19:48:23,519 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-26 19:48:32,941 INFO [finetune.py:976] (5/7) Epoch 16, batch 4050, loss[loss=0.1749, simple_loss=0.255, pruned_loss=0.04744, over 4808.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2526, pruned_loss=0.05859, over 954729.37 frames. ], batch size: 51, lr: 3.43e-03, grad_scale: 64.0 +2023-03-26 19:49:19,446 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0768, 1.1865, 1.1753, 1.2793, 1.4400, 2.5282, 1.1048, 1.2933], + device='cuda:5'), covar=tensor([0.1382, 0.2562, 0.1373, 0.1271, 0.1992, 0.0456, 0.2251, 0.2542], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0078, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:49:29,801 INFO [finetune.py:976] (5/7) Epoch 16, batch 4100, loss[loss=0.1869, simple_loss=0.2537, pruned_loss=0.06009, over 4805.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2549, pruned_loss=0.05899, over 954599.53 frames. ], batch size: 45, lr: 3.43e-03, grad_scale: 64.0 +2023-03-26 19:49:43,245 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90030.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:49:46,952 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90036.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:49:54,441 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.559e+02 1.839e+02 2.160e+02 6.359e+02, threshold=3.678e+02, percent-clipped=1.0 +2023-03-26 19:49:59,532 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 19:50:06,384 INFO [finetune.py:976] (5/7) Epoch 16, batch 4150, loss[loss=0.1887, simple_loss=0.2491, pruned_loss=0.06416, over 4195.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2558, pruned_loss=0.05958, over 954337.26 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 64.0 +2023-03-26 19:50:14,193 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90078.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:50:16,550 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90081.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:50:26,428 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90095.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 19:50:32,507 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8963, 1.7140, 2.5446, 2.2188, 2.2264, 4.5922, 1.7552, 1.9914], + device='cuda:5'), covar=tensor([0.0938, 0.1788, 0.0973, 0.0961, 0.1417, 0.0179, 0.1497, 0.1717], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0078, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 19:50:39,512 INFO [finetune.py:976] (5/7) Epoch 16, batch 4200, loss[loss=0.1775, simple_loss=0.2506, pruned_loss=0.05217, over 4792.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.2561, pruned_loss=0.05939, over 956077.57 frames. ], batch size: 51, lr: 3.43e-03, grad_scale: 64.0 +2023-03-26 19:50:45,693 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-03-26 19:50:47,974 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90129.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:50:48,625 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90130.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:50:54,462 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5101, 2.2609, 2.0640, 2.4516, 2.2010, 2.2263, 2.1679, 3.2531], + device='cuda:5'), covar=tensor([0.3830, 0.5250, 0.3209, 0.4344, 0.4475, 0.2652, 0.4816, 0.1555], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0261, 0.0226, 0.0276, 0.0249, 0.0216, 0.0250, 0.0228], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:50:57,838 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90143.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:51:00,650 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.087e+02 1.547e+02 1.785e+02 2.134e+02 3.751e+02, threshold=3.570e+02, percent-clipped=1.0 +2023-03-26 19:51:12,339 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90165.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:51:12,837 INFO [finetune.py:976] (5/7) Epoch 16, batch 4250, loss[loss=0.1802, simple_loss=0.2422, pruned_loss=0.05913, over 4726.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2538, pruned_loss=0.05855, over 956173.81 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 64.0 +2023-03-26 19:51:29,569 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90191.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:51:43,666 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90213.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:51:45,897 INFO [finetune.py:976] (5/7) Epoch 16, batch 4300, loss[loss=0.1423, simple_loss=0.2114, pruned_loss=0.03658, over 4833.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2507, pruned_loss=0.0576, over 956291.55 frames. ], batch size: 30, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:51:48,974 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90221.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 19:51:56,157 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90232.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:52:07,167 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.551e+02 1.797e+02 2.190e+02 3.764e+02, threshold=3.594e+02, percent-clipped=1.0 +2023-03-26 19:52:10,954 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 19:52:18,554 INFO [finetune.py:976] (5/7) Epoch 16, batch 4350, loss[loss=0.2035, simple_loss=0.2662, pruned_loss=0.07038, over 4921.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2471, pruned_loss=0.05602, over 957177.64 frames. ], batch size: 38, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:52:37,000 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90293.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:52:38,865 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2235, 1.9921, 1.7842, 2.1095, 1.9466, 1.9650, 1.9525, 2.7475], + device='cuda:5'), covar=tensor([0.3976, 0.5015, 0.3421, 0.4123, 0.4469, 0.2649, 0.4053, 0.1720], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0261, 0.0226, 0.0276, 0.0249, 0.0216, 0.0251, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:52:51,898 INFO [finetune.py:976] (5/7) Epoch 16, batch 4400, loss[loss=0.1835, simple_loss=0.2578, pruned_loss=0.05461, over 4817.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.2484, pruned_loss=0.05631, over 955527.71 frames. ], batch size: 33, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:52:52,024 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8365, 1.7886, 1.6260, 2.0660, 2.3483, 2.0535, 1.6032, 1.5329], + device='cuda:5'), covar=tensor([0.2121, 0.1890, 0.1830, 0.1581, 0.1596, 0.1146, 0.2282, 0.1896], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0209, 0.0211, 0.0191, 0.0243, 0.0185, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:52:55,432 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7047, 1.5081, 1.2633, 1.4331, 2.0364, 2.0651, 1.7291, 1.4747], + device='cuda:5'), covar=tensor([0.0326, 0.0348, 0.0817, 0.0403, 0.0198, 0.0308, 0.0290, 0.0375], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0109, 0.0145, 0.0113, 0.0101, 0.0109, 0.0099, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.4061e-05, 8.4353e-05, 1.1471e-04, 8.7459e-05, 7.8336e-05, 8.0137e-05, + 7.4392e-05, 8.3304e-05], device='cuda:5') +2023-03-26 19:53:04,944 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90336.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:53:13,598 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.586e+02 1.845e+02 2.241e+02 4.760e+02, threshold=3.689e+02, percent-clipped=4.0 +2023-03-26 19:53:15,502 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90351.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:53:25,408 INFO [finetune.py:976] (5/7) Epoch 16, batch 4450, loss[loss=0.2539, simple_loss=0.3144, pruned_loss=0.09671, over 4813.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2526, pruned_loss=0.05773, over 956348.89 frames. ], batch size: 51, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:53:29,668 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7347, 1.6748, 1.4575, 1.8759, 2.1044, 1.8805, 1.5087, 1.4474], + device='cuda:5'), covar=tensor([0.1965, 0.1900, 0.1784, 0.1534, 0.1703, 0.1147, 0.2393, 0.1747], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0208, 0.0211, 0.0191, 0.0242, 0.0185, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:53:37,365 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90384.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:53:42,404 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 19:53:56,379 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.77 vs. limit=5.0 +2023-03-26 19:54:05,577 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90412.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:54:07,884 INFO [finetune.py:976] (5/7) Epoch 16, batch 4500, loss[loss=0.2351, simple_loss=0.3063, pruned_loss=0.08194, over 4852.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2556, pruned_loss=0.05903, over 954686.45 frames. ], batch size: 44, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:54:32,817 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-03-26 19:54:40,946 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.141e+02 1.659e+02 2.056e+02 2.631e+02 3.688e+02, threshold=4.111e+02, percent-clipped=0.0 +2023-03-26 19:54:49,299 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.57 vs. limit=5.0 +2023-03-26 19:54:53,433 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90459.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:55:01,922 INFO [finetune.py:976] (5/7) Epoch 16, batch 4550, loss[loss=0.1607, simple_loss=0.2268, pruned_loss=0.04736, over 3897.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2564, pruned_loss=0.05871, over 954418.32 frames. ], batch size: 17, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:55:18,094 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90486.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:55:24,685 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90496.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:55:38,628 INFO [finetune.py:976] (5/7) Epoch 16, batch 4600, loss[loss=0.206, simple_loss=0.2433, pruned_loss=0.08437, over 4002.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.2556, pruned_loss=0.0585, over 953002.38 frames. ], batch size: 17, lr: 3.43e-03, grad_scale: 32.0 +2023-03-26 19:55:41,048 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3027, 2.2575, 1.8214, 2.2102, 2.2793, 1.9539, 2.5342, 2.3153], + device='cuda:5'), covar=tensor([0.1433, 0.2059, 0.3135, 0.2468, 0.2598, 0.1864, 0.2524, 0.1828], + device='cuda:5'), in_proj_covar=tensor([0.0181, 0.0186, 0.0233, 0.0251, 0.0243, 0.0201, 0.0211, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:55:41,640 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90520.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:55:42,198 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90521.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 19:55:59,294 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.556e+01 1.469e+02 1.715e+02 2.012e+02 4.010e+02, threshold=3.429e+02, percent-clipped=0.0 +2023-03-26 19:56:06,273 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90557.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:56:11,543 INFO [finetune.py:976] (5/7) Epoch 16, batch 4650, loss[loss=0.1707, simple_loss=0.2482, pruned_loss=0.04659, over 4841.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2531, pruned_loss=0.05731, over 953384.96 frames. ], batch size: 47, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 19:56:14,405 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90569.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:56:22,823 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2224, 2.1536, 1.6933, 2.1906, 2.1858, 1.9025, 2.5145, 2.2667], + device='cuda:5'), covar=tensor([0.1253, 0.2257, 0.2876, 0.2656, 0.2339, 0.1545, 0.3229, 0.1684], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0186, 0.0233, 0.0252, 0.0243, 0.0201, 0.0211, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:56:23,893 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 19:56:26,170 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90588.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:56:45,054 INFO [finetune.py:976] (5/7) Epoch 16, batch 4700, loss[loss=0.145, simple_loss=0.2188, pruned_loss=0.03559, over 4792.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.2498, pruned_loss=0.05638, over 954253.28 frames. ], batch size: 29, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 19:57:02,750 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9636, 1.7355, 2.2540, 1.4602, 2.0200, 2.2532, 1.5881, 2.3520], + device='cuda:5'), covar=tensor([0.1371, 0.2193, 0.1461, 0.2155, 0.0971, 0.1401, 0.3048, 0.0769], + device='cuda:5'), in_proj_covar=tensor([0.0195, 0.0207, 0.0193, 0.0193, 0.0178, 0.0215, 0.0220, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:57:05,664 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.103e+02 1.593e+02 1.858e+02 2.163e+02 3.767e+02, threshold=3.717e+02, percent-clipped=1.0 +2023-03-26 19:57:18,494 INFO [finetune.py:976] (5/7) Epoch 16, batch 4750, loss[loss=0.163, simple_loss=0.2472, pruned_loss=0.03936, over 4916.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2492, pruned_loss=0.05653, over 955182.21 frames. ], batch size: 43, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 19:57:45,625 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90707.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:57:51,479 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.17 vs. limit=5.0 +2023-03-26 19:57:52,454 INFO [finetune.py:976] (5/7) Epoch 16, batch 4800, loss[loss=0.23, simple_loss=0.2945, pruned_loss=0.08281, over 4821.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2509, pruned_loss=0.05712, over 953937.41 frames. ], batch size: 45, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 19:58:13,286 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.629e+02 1.979e+02 2.321e+02 4.531e+02, threshold=3.957e+02, percent-clipped=1.0 +2023-03-26 19:58:25,071 INFO [finetune.py:976] (5/7) Epoch 16, batch 4850, loss[loss=0.1618, simple_loss=0.2361, pruned_loss=0.04371, over 4757.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2546, pruned_loss=0.05839, over 955293.52 frames. ], batch size: 27, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 19:58:34,845 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7798, 4.1238, 4.0019, 1.9809, 4.3133, 3.2162, 0.8839, 2.9203], + device='cuda:5'), covar=tensor([0.2341, 0.1840, 0.1288, 0.3773, 0.0754, 0.0922, 0.4934, 0.1587], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0175, 0.0158, 0.0128, 0.0157, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 19:58:37,281 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3993, 1.2729, 1.1375, 1.3781, 1.7358, 1.5725, 1.3883, 1.2087], + device='cuda:5'), covar=tensor([0.0359, 0.0375, 0.0668, 0.0355, 0.0222, 0.0555, 0.0328, 0.0412], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0109, 0.0145, 0.0113, 0.0100, 0.0108, 0.0099, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3748e-05, 8.4167e-05, 1.1429e-04, 8.7511e-05, 7.7760e-05, 7.9802e-05, + 7.4109e-05, 8.3171e-05], device='cuda:5') +2023-03-26 19:58:39,069 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90786.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:58:57,838 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90815.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:58:58,402 INFO [finetune.py:976] (5/7) Epoch 16, batch 4900, loss[loss=0.1666, simple_loss=0.2383, pruned_loss=0.04744, over 4808.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2538, pruned_loss=0.05775, over 954797.10 frames. ], batch size: 40, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 19:59:11,175 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90834.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:59:11,858 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0375, 1.9427, 1.5922, 1.8376, 1.8052, 1.8260, 1.8356, 2.5773], + device='cuda:5'), covar=tensor([0.3892, 0.4460, 0.3475, 0.3940, 0.4165, 0.2406, 0.3842, 0.1652], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0261, 0.0226, 0.0275, 0.0249, 0.0217, 0.0250, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 19:59:24,120 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.543e+02 1.926e+02 2.205e+02 3.945e+02, threshold=3.852e+02, percent-clipped=0.0 +2023-03-26 19:59:25,353 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90849.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:59:27,133 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90852.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 19:59:42,912 INFO [finetune.py:976] (5/7) Epoch 16, batch 4950, loss[loss=0.156, simple_loss=0.2325, pruned_loss=0.03974, over 4890.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2545, pruned_loss=0.05745, over 955205.59 frames. ], batch size: 32, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:00:12,905 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90888.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:00:31,213 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7635, 1.8410, 2.3975, 2.0890, 2.0786, 4.4664, 1.7819, 1.9561], + device='cuda:5'), covar=tensor([0.1002, 0.1747, 0.1060, 0.0939, 0.1490, 0.0210, 0.1472, 0.1674], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0078, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:00:34,900 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90910.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:00:38,891 INFO [finetune.py:976] (5/7) Epoch 16, batch 5000, loss[loss=0.1916, simple_loss=0.2495, pruned_loss=0.06686, over 4930.00 frames. ], tot_loss[loss=0.1825, simple_loss=0.2522, pruned_loss=0.05635, over 956314.02 frames. ], batch size: 33, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:00:53,036 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=90936.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:00:53,718 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4506, 1.5122, 1.3070, 1.5165, 1.9386, 1.6752, 1.5254, 1.4045], + device='cuda:5'), covar=tensor([0.0358, 0.0291, 0.0590, 0.0324, 0.0154, 0.0566, 0.0299, 0.0356], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0108, 0.0143, 0.0112, 0.0099, 0.0107, 0.0098, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.3184e-05, 8.3578e-05, 1.1326e-04, 8.6731e-05, 7.7144e-05, 7.9259e-05, + 7.3608e-05, 8.2665e-05], device='cuda:5') +2023-03-26 20:01:00,204 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.882e+01 1.531e+02 1.843e+02 2.134e+02 5.620e+02, threshold=3.687e+02, percent-clipped=2.0 +2023-03-26 20:01:11,975 INFO [finetune.py:976] (5/7) Epoch 16, batch 5050, loss[loss=0.1919, simple_loss=0.2578, pruned_loss=0.06303, over 4815.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.2502, pruned_loss=0.05576, over 957025.73 frames. ], batch size: 39, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:01:40,197 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91007.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:01:45,573 INFO [finetune.py:976] (5/7) Epoch 16, batch 5100, loss[loss=0.1749, simple_loss=0.2339, pruned_loss=0.05793, over 4833.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.2464, pruned_loss=0.05392, over 957955.95 frames. ], batch size: 30, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:01:55,152 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0951, 1.8307, 2.2138, 1.5640, 1.9995, 2.2249, 1.7769, 2.3484], + device='cuda:5'), covar=tensor([0.1020, 0.1486, 0.1256, 0.1465, 0.0754, 0.1016, 0.2236, 0.0651], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0203, 0.0190, 0.0190, 0.0175, 0.0211, 0.0217, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:02:07,771 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.471e+02 1.778e+02 2.096e+02 3.940e+02, threshold=3.556e+02, percent-clipped=2.0 +2023-03-26 20:02:12,115 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=91055.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:02:19,222 INFO [finetune.py:976] (5/7) Epoch 16, batch 5150, loss[loss=0.2064, simple_loss=0.2769, pruned_loss=0.06793, over 4711.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2463, pruned_loss=0.05442, over 955927.09 frames. ], batch size: 59, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:02:31,787 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9616, 1.9273, 1.7603, 2.1504, 2.4126, 2.1849, 1.7980, 1.6448], + device='cuda:5'), covar=tensor([0.2073, 0.1832, 0.1767, 0.1461, 0.1637, 0.1105, 0.2214, 0.1868], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0210, 0.0213, 0.0192, 0.0244, 0.0187, 0.0217, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:02:52,451 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91115.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:02:52,958 INFO [finetune.py:976] (5/7) Epoch 16, batch 5200, loss[loss=0.1841, simple_loss=0.2604, pruned_loss=0.05391, over 4769.00 frames. ], tot_loss[loss=0.1817, simple_loss=0.2506, pruned_loss=0.05634, over 954029.42 frames. ], batch size: 54, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:03:14,339 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.594e+02 1.966e+02 2.465e+02 4.658e+02, threshold=3.932e+02, percent-clipped=3.0 +2023-03-26 20:03:17,820 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91152.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:03:24,389 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=91163.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:03:26,649 INFO [finetune.py:976] (5/7) Epoch 16, batch 5250, loss[loss=0.2167, simple_loss=0.2787, pruned_loss=0.07735, over 4795.00 frames. ], tot_loss[loss=0.1831, simple_loss=0.2525, pruned_loss=0.05679, over 953607.10 frames. ], batch size: 51, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:03:27,392 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4775, 1.3545, 1.2308, 1.4480, 1.5984, 1.4891, 1.0261, 1.2616], + device='cuda:5'), covar=tensor([0.2149, 0.1997, 0.1909, 0.1704, 0.1709, 0.1304, 0.2652, 0.1960], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0211, 0.0214, 0.0193, 0.0245, 0.0188, 0.0218, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:03:49,259 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=91200.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:03:51,099 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 20:03:53,266 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91205.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:03:59,842 INFO [finetune.py:976] (5/7) Epoch 16, batch 5300, loss[loss=0.1612, simple_loss=0.2294, pruned_loss=0.04648, over 4756.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2534, pruned_loss=0.05712, over 951757.36 frames. ], batch size: 26, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:04:04,115 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7829, 2.5683, 2.1164, 1.0716, 2.3038, 2.0532, 1.9678, 2.3576], + device='cuda:5'), covar=tensor([0.0771, 0.0814, 0.1541, 0.2189, 0.1301, 0.2257, 0.2334, 0.1021], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0195, 0.0198, 0.0182, 0.0211, 0.0205, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:04:21,114 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.597e+02 1.843e+02 2.222e+02 3.769e+02, threshold=3.686e+02, percent-clipped=0.0 +2023-03-26 20:04:23,581 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5960, 1.6033, 1.3709, 1.6286, 2.0848, 1.8896, 1.6519, 1.5153], + device='cuda:5'), covar=tensor([0.0327, 0.0309, 0.0587, 0.0306, 0.0172, 0.0481, 0.0283, 0.0343], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0109, 0.0145, 0.0113, 0.0100, 0.0108, 0.0099, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.4047e-05, 8.4291e-05, 1.1437e-04, 8.7269e-05, 7.7868e-05, 7.9965e-05, + 7.4061e-05, 8.3323e-05], device='cuda:5') +2023-03-26 20:04:28,459 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.82 vs. limit=5.0 +2023-03-26 20:04:33,501 INFO [finetune.py:976] (5/7) Epoch 16, batch 5350, loss[loss=0.1946, simple_loss=0.2532, pruned_loss=0.06795, over 4894.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2544, pruned_loss=0.05744, over 952688.74 frames. ], batch size: 35, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:04:41,809 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7076, 2.7193, 2.5536, 2.8916, 3.5074, 2.8536, 2.9758, 2.3681], + device='cuda:5'), covar=tensor([0.1718, 0.1608, 0.1505, 0.1361, 0.1272, 0.0886, 0.1433, 0.1540], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0208, 0.0210, 0.0190, 0.0241, 0.0185, 0.0215, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:04:50,656 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91285.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:05:29,809 INFO [finetune.py:976] (5/7) Epoch 16, batch 5400, loss[loss=0.1612, simple_loss=0.2257, pruned_loss=0.04838, over 4733.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2514, pruned_loss=0.05647, over 953227.22 frames. ], batch size: 23, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:06:02,270 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91346.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:06:03,346 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.781e+01 1.583e+02 1.812e+02 2.291e+02 3.767e+02, threshold=3.624e+02, percent-clipped=1.0 +2023-03-26 20:06:15,750 INFO [finetune.py:976] (5/7) Epoch 16, batch 5450, loss[loss=0.1832, simple_loss=0.2646, pruned_loss=0.05087, over 4909.00 frames. ], tot_loss[loss=0.1799, simple_loss=0.2486, pruned_loss=0.05558, over 953784.66 frames. ], batch size: 43, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:06:49,418 INFO [finetune.py:976] (5/7) Epoch 16, batch 5500, loss[loss=0.1882, simple_loss=0.2469, pruned_loss=0.06471, over 4877.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2453, pruned_loss=0.05422, over 952597.59 frames. ], batch size: 34, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:06:51,675 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.52 vs. limit=5.0 +2023-03-26 20:07:10,224 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.789e+01 1.460e+02 1.744e+02 2.187e+02 6.443e+02, threshold=3.488e+02, percent-clipped=1.0 +2023-03-26 20:07:10,960 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0677, 2.2363, 2.0226, 2.3461, 2.6414, 2.1627, 2.1708, 1.6524], + device='cuda:5'), covar=tensor([0.2478, 0.2113, 0.1973, 0.1750, 0.1953, 0.1325, 0.2300, 0.2128], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0208, 0.0211, 0.0190, 0.0242, 0.0186, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:07:15,617 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5902, 1.5267, 1.4160, 1.5215, 1.0282, 3.1461, 1.2205, 1.5688], + device='cuda:5'), covar=tensor([0.3302, 0.2494, 0.2143, 0.2280, 0.1824, 0.0249, 0.2828, 0.1389], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0123, 0.0114, 0.0097, 0.0096, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:07:22,094 INFO [finetune.py:976] (5/7) Epoch 16, batch 5550, loss[loss=0.1649, simple_loss=0.2405, pruned_loss=0.04464, over 4811.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2459, pruned_loss=0.05422, over 952293.77 frames. ], batch size: 45, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:07:34,663 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5655, 1.4522, 1.2972, 1.4874, 1.8352, 1.6980, 1.5529, 1.3565], + device='cuda:5'), covar=tensor([0.0313, 0.0318, 0.0562, 0.0275, 0.0179, 0.0475, 0.0328, 0.0371], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0109, 0.0145, 0.0113, 0.0101, 0.0109, 0.0099, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4453e-05, 8.4456e-05, 1.1466e-04, 8.7569e-05, 7.8359e-05, 8.0616e-05, + 7.4490e-05, 8.3794e-05], device='cuda:5') +2023-03-26 20:07:47,596 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91505.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:07:53,902 INFO [finetune.py:976] (5/7) Epoch 16, batch 5600, loss[loss=0.1941, simple_loss=0.268, pruned_loss=0.06011, over 4867.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2494, pruned_loss=0.05503, over 954082.82 frames. ], batch size: 34, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:08:08,999 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91542.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 20:08:13,217 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.075e+02 1.603e+02 1.969e+02 2.458e+02 5.397e+02, threshold=3.938e+02, percent-clipped=5.0 +2023-03-26 20:08:16,176 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=91553.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:08:23,624 INFO [finetune.py:976] (5/7) Epoch 16, batch 5650, loss[loss=0.1897, simple_loss=0.2733, pruned_loss=0.0531, over 4846.00 frames. ], tot_loss[loss=0.1835, simple_loss=0.2536, pruned_loss=0.05672, over 954589.85 frames. ], batch size: 47, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:08:45,674 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91603.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 20:08:53,175 INFO [finetune.py:976] (5/7) Epoch 16, batch 5700, loss[loss=0.1638, simple_loss=0.2275, pruned_loss=0.05003, over 4402.00 frames. ], tot_loss[loss=0.1819, simple_loss=0.2506, pruned_loss=0.05658, over 937354.49 frames. ], batch size: 19, lr: 3.42e-03, grad_scale: 32.0 +2023-03-26 20:09:07,888 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91641.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:09:21,367 INFO [finetune.py:976] (5/7) Epoch 17, batch 0, loss[loss=0.2023, simple_loss=0.272, pruned_loss=0.0663, over 4846.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.272, pruned_loss=0.0663, over 4846.00 frames. ], batch size: 44, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:09:21,367 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 20:09:23,579 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8480, 1.1409, 1.9440, 1.8415, 1.7282, 1.6614, 1.7498, 1.8571], + device='cuda:5'), covar=tensor([0.4228, 0.4264, 0.3737, 0.3837, 0.5070, 0.3867, 0.4644, 0.3400], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0240, 0.0259, 0.0271, 0.0270, 0.0245, 0.0282, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:09:23,642 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8541, 3.4204, 3.5222, 3.7261, 3.6186, 3.3156, 3.9212, 1.3104], + device='cuda:5'), covar=tensor([0.0862, 0.0814, 0.0902, 0.0997, 0.1407, 0.1822, 0.0761, 0.5208], + device='cuda:5'), in_proj_covar=tensor([0.0345, 0.0241, 0.0272, 0.0290, 0.0330, 0.0278, 0.0296, 0.0291], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:09:24,173 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5629, 1.3143, 1.3998, 1.5094, 1.7889, 1.6642, 1.4623, 1.3022], + device='cuda:5'), covar=tensor([0.0329, 0.0304, 0.0609, 0.0327, 0.0207, 0.0430, 0.0362, 0.0367], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0108, 0.0144, 0.0113, 0.0100, 0.0108, 0.0098, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3808e-05, 8.3862e-05, 1.1375e-04, 8.6997e-05, 7.7713e-05, 7.9723e-05, + 7.3573e-05, 8.3274e-05], device='cuda:5') +2023-03-26 20:09:32,014 INFO [finetune.py:1010] (5/7) Epoch 17, validation: loss=0.1591, simple_loss=0.2283, pruned_loss=0.04492, over 2265189.00 frames. +2023-03-26 20:09:32,015 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 20:09:35,492 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.774e+01 1.479e+02 1.757e+02 2.057e+02 5.096e+02, threshold=3.514e+02, percent-clipped=1.0 +2023-03-26 20:09:46,940 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4274, 1.3645, 1.3611, 1.3168, 0.6989, 2.2875, 0.7086, 1.3661], + device='cuda:5'), covar=tensor([0.3209, 0.2449, 0.2102, 0.2362, 0.2012, 0.0360, 0.2698, 0.1216], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0123, 0.0113, 0.0096, 0.0096, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:09:55,357 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1806, 1.8754, 1.8837, 1.0302, 2.1625, 2.2841, 2.0737, 1.8025], + device='cuda:5'), covar=tensor([0.0912, 0.0713, 0.0608, 0.0652, 0.0463, 0.0828, 0.0460, 0.0690], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0151, 0.0124, 0.0128, 0.0131, 0.0129, 0.0143, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.3002e-05, 1.0992e-04, 8.9042e-05, 9.1187e-05, 9.2592e-05, 9.3088e-05, + 1.0336e-04, 1.0703e-04], device='cuda:5') +2023-03-26 20:10:07,337 INFO [finetune.py:976] (5/7) Epoch 17, batch 50, loss[loss=0.1499, simple_loss=0.2272, pruned_loss=0.03632, over 4890.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2569, pruned_loss=0.05999, over 215836.24 frames. ], batch size: 32, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:10:22,235 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9957, 1.6856, 2.4219, 1.5097, 2.1672, 2.3648, 1.6211, 2.4753], + device='cuda:5'), covar=tensor([0.1244, 0.1878, 0.1401, 0.1962, 0.0786, 0.1234, 0.2639, 0.0806], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0203, 0.0190, 0.0190, 0.0175, 0.0212, 0.0218, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:10:52,789 INFO [finetune.py:976] (5/7) Epoch 17, batch 100, loss[loss=0.1947, simple_loss=0.2512, pruned_loss=0.06914, over 4826.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2521, pruned_loss=0.05762, over 381109.71 frames. ], batch size: 38, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:11:01,252 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.600e+02 1.810e+02 2.096e+02 3.529e+02, threshold=3.620e+02, percent-clipped=1.0 +2023-03-26 20:11:09,370 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-03-26 20:11:37,627 INFO [finetune.py:976] (5/7) Epoch 17, batch 150, loss[loss=0.1976, simple_loss=0.2596, pruned_loss=0.06779, over 4931.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.2459, pruned_loss=0.0552, over 510673.02 frames. ], batch size: 33, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:12:11,013 INFO [finetune.py:976] (5/7) Epoch 17, batch 200, loss[loss=0.2379, simple_loss=0.3062, pruned_loss=0.08485, over 4829.00 frames. ], tot_loss[loss=0.18, simple_loss=0.247, pruned_loss=0.05654, over 609986.09 frames. ], batch size: 47, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:12:11,702 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7140, 1.8899, 2.4476, 2.0543, 2.1064, 4.5425, 1.7711, 2.0877], + device='cuda:5'), covar=tensor([0.0978, 0.1657, 0.1008, 0.0955, 0.1428, 0.0179, 0.1453, 0.1697], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:12:14,524 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.078e+02 1.595e+02 1.938e+02 2.273e+02 4.627e+02, threshold=3.876e+02, percent-clipped=4.0 +2023-03-26 20:12:43,710 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91891.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:12:44,795 INFO [finetune.py:976] (5/7) Epoch 17, batch 250, loss[loss=0.2199, simple_loss=0.2979, pruned_loss=0.07094, over 4837.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2506, pruned_loss=0.05769, over 687308.35 frames. ], batch size: 49, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:12:48,430 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91898.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 20:12:50,141 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7417, 1.5326, 1.4085, 1.6222, 2.0169, 1.8760, 1.6108, 1.4505], + device='cuda:5'), covar=tensor([0.0280, 0.0353, 0.0555, 0.0311, 0.0183, 0.0398, 0.0292, 0.0391], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0108, 0.0144, 0.0113, 0.0100, 0.0108, 0.0098, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.4044e-05, 8.3918e-05, 1.1392e-04, 8.7083e-05, 7.7791e-05, 7.9603e-05, + 7.3818e-05, 8.3000e-05], device='cuda:5') +2023-03-26 20:13:14,614 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3344, 2.5300, 2.2630, 1.7339, 2.4104, 2.6131, 2.6521, 2.2214], + device='cuda:5'), covar=tensor([0.0664, 0.0567, 0.0771, 0.0932, 0.0784, 0.0707, 0.0599, 0.0998], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0134, 0.0141, 0.0124, 0.0124, 0.0140, 0.0141, 0.0165], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:13:17,060 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91941.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:13:18,233 INFO [finetune.py:976] (5/7) Epoch 17, batch 300, loss[loss=0.1595, simple_loss=0.2378, pruned_loss=0.04063, over 4750.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2536, pruned_loss=0.05837, over 747934.56 frames. ], batch size: 27, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:13:21,757 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.873e+01 1.605e+02 2.003e+02 2.239e+02 3.510e+02, threshold=4.006e+02, percent-clipped=0.0 +2023-03-26 20:13:22,002 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-26 20:13:24,428 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91952.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:13:26,509 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7761, 3.7609, 3.5850, 1.8853, 3.8045, 2.9768, 1.1522, 2.6229], + device='cuda:5'), covar=tensor([0.2237, 0.2339, 0.1578, 0.3453, 0.1144, 0.1016, 0.4291, 0.1708], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0174, 0.0159, 0.0128, 0.0158, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:13:27,197 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91955.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:13:44,445 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91981.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 20:13:45,337 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-03-26 20:13:49,292 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=91989.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:13:52,147 INFO [finetune.py:976] (5/7) Epoch 17, batch 350, loss[loss=0.2148, simple_loss=0.2803, pruned_loss=0.07467, over 4713.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.255, pruned_loss=0.05906, over 793173.93 frames. ], batch size: 59, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:13:54,109 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5733, 1.4968, 1.4454, 1.4799, 1.3213, 3.3236, 1.3409, 1.7827], + device='cuda:5'), covar=tensor([0.3376, 0.2547, 0.2238, 0.2531, 0.1623, 0.0212, 0.2651, 0.1265], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0123, 0.0113, 0.0096, 0.0096, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:14:09,812 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92016.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:14:11,112 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 20:14:26,155 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92042.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:14:26,649 INFO [finetune.py:976] (5/7) Epoch 17, batch 400, loss[loss=0.1884, simple_loss=0.2462, pruned_loss=0.0653, over 4866.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.256, pruned_loss=0.05967, over 828190.52 frames. ], batch size: 34, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:14:30,186 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.886e+01 1.544e+02 1.847e+02 2.163e+02 3.487e+02, threshold=3.695e+02, percent-clipped=0.0 +2023-03-26 20:14:48,540 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 20:15:00,227 INFO [finetune.py:976] (5/7) Epoch 17, batch 450, loss[loss=0.2075, simple_loss=0.2731, pruned_loss=0.07092, over 4821.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2543, pruned_loss=0.0584, over 857182.24 frames. ], batch size: 33, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:15:06,688 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4415, 1.3026, 1.6674, 2.4793, 1.6838, 2.2055, 0.8708, 2.0987], + device='cuda:5'), covar=tensor([0.1716, 0.1458, 0.1174, 0.0724, 0.0915, 0.1040, 0.1568, 0.0699], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0165, 0.0101, 0.0137, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 20:15:10,824 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9103, 1.8753, 1.6510, 2.0063, 2.3037, 2.0353, 1.5139, 1.5771], + device='cuda:5'), covar=tensor([0.2217, 0.1982, 0.1926, 0.1733, 0.1759, 0.1203, 0.2465, 0.1955], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0209, 0.0213, 0.0192, 0.0244, 0.0186, 0.0217, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:15:28,968 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2837, 3.6590, 3.8971, 4.0796, 4.0815, 3.8012, 4.3362, 1.4138], + device='cuda:5'), covar=tensor([0.0713, 0.0868, 0.0899, 0.1056, 0.1078, 0.1419, 0.0651, 0.5588], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0246, 0.0278, 0.0295, 0.0336, 0.0283, 0.0302, 0.0298], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:15:33,735 INFO [finetune.py:976] (5/7) Epoch 17, batch 500, loss[loss=0.2095, simple_loss=0.2663, pruned_loss=0.07641, over 4823.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2518, pruned_loss=0.05737, over 880857.07 frames. ], batch size: 40, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:15:37,218 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.042e+02 1.578e+02 1.863e+02 2.269e+02 4.074e+02, threshold=3.727e+02, percent-clipped=2.0 +2023-03-26 20:16:30,569 INFO [finetune.py:976] (5/7) Epoch 17, batch 550, loss[loss=0.131, simple_loss=0.2075, pruned_loss=0.02729, over 4766.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2489, pruned_loss=0.05692, over 896985.34 frames. ], batch size: 28, lr: 3.41e-03, grad_scale: 32.0 +2023-03-26 20:16:33,718 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92198.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 20:16:58,532 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-26 20:17:13,334 INFO [finetune.py:976] (5/7) Epoch 17, batch 600, loss[loss=0.2239, simple_loss=0.282, pruned_loss=0.08292, over 4811.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2515, pruned_loss=0.0584, over 910111.48 frames. ], batch size: 45, lr: 3.41e-03, grad_scale: 64.0 +2023-03-26 20:17:15,214 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=92246.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:17:15,825 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92247.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:17:16,353 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.709e+02 1.980e+02 2.349e+02 5.069e+02, threshold=3.960e+02, percent-clipped=5.0 +2023-03-26 20:17:47,090 INFO [finetune.py:976] (5/7) Epoch 17, batch 650, loss[loss=0.1803, simple_loss=0.2625, pruned_loss=0.04904, over 4803.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2537, pruned_loss=0.05837, over 920456.71 frames. ], batch size: 51, lr: 3.41e-03, grad_scale: 64.0 +2023-03-26 20:17:51,734 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-26 20:17:58,668 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92311.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:18:16,769 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92337.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 20:18:20,699 INFO [finetune.py:976] (5/7) Epoch 17, batch 700, loss[loss=0.1811, simple_loss=0.2566, pruned_loss=0.05278, over 4734.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2567, pruned_loss=0.05895, over 928484.79 frames. ], batch size: 54, lr: 3.41e-03, grad_scale: 64.0 +2023-03-26 20:18:23,724 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.116e+02 1.553e+02 1.844e+02 2.135e+02 3.970e+02, threshold=3.688e+02, percent-clipped=1.0 +2023-03-26 20:18:28,632 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4853, 1.5192, 1.8823, 1.2630, 1.5578, 1.7876, 1.3952, 2.0100], + device='cuda:5'), covar=tensor([0.1326, 0.1790, 0.1222, 0.1661, 0.0875, 0.1349, 0.2688, 0.0771], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0203, 0.0190, 0.0189, 0.0176, 0.0213, 0.0217, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:18:54,355 INFO [finetune.py:976] (5/7) Epoch 17, batch 750, loss[loss=0.2018, simple_loss=0.2671, pruned_loss=0.06829, over 4824.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2579, pruned_loss=0.05922, over 934133.99 frames. ], batch size: 30, lr: 3.41e-03, grad_scale: 64.0 +2023-03-26 20:19:00,068 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3519, 2.2581, 1.7213, 2.2366, 2.2266, 1.9664, 2.5833, 2.2806], + device='cuda:5'), covar=tensor([0.1314, 0.2053, 0.3021, 0.2592, 0.2426, 0.1572, 0.3088, 0.1693], + device='cuda:5'), in_proj_covar=tensor([0.0183, 0.0187, 0.0234, 0.0253, 0.0244, 0.0202, 0.0213, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:19:01,265 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4922, 1.4798, 1.8417, 1.6931, 1.6596, 3.3236, 1.3563, 1.6278], + device='cuda:5'), covar=tensor([0.0952, 0.1836, 0.1053, 0.0979, 0.1624, 0.0292, 0.1615, 0.1697], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0078, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:19:16,559 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3603, 2.3839, 2.5123, 1.5716, 2.3501, 2.4367, 2.4172, 2.0276], + device='cuda:5'), covar=tensor([0.0596, 0.0571, 0.0572, 0.0957, 0.0570, 0.0744, 0.0605, 0.1082], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0135, 0.0142, 0.0124, 0.0124, 0.0141, 0.0142, 0.0166], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:19:20,988 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-03-26 20:19:28,150 INFO [finetune.py:976] (5/7) Epoch 17, batch 800, loss[loss=0.2034, simple_loss=0.2791, pruned_loss=0.06378, over 4915.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2565, pruned_loss=0.05849, over 938689.85 frames. ], batch size: 46, lr: 3.41e-03, grad_scale: 64.0 +2023-03-26 20:19:31,195 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.753e+02 1.963e+02 2.342e+02 4.288e+02, threshold=3.926e+02, percent-clipped=2.0 +2023-03-26 20:19:38,625 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4458, 2.1231, 1.5976, 0.7401, 1.8886, 1.9216, 1.7784, 1.9184], + device='cuda:5'), covar=tensor([0.0797, 0.0889, 0.1508, 0.2028, 0.1326, 0.2139, 0.2116, 0.0860], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0195, 0.0198, 0.0182, 0.0211, 0.0206, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:19:52,133 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4239, 2.3663, 1.9690, 2.3351, 2.3860, 2.1228, 2.7195, 2.3531], + device='cuda:5'), covar=tensor([0.1281, 0.1762, 0.2758, 0.2339, 0.2113, 0.1487, 0.2472, 0.1651], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0187, 0.0234, 0.0253, 0.0244, 0.0201, 0.0213, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:19:55,060 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9058, 1.1986, 1.9318, 1.9040, 1.7216, 1.6659, 1.8249, 1.8021], + device='cuda:5'), covar=tensor([0.3738, 0.3962, 0.3265, 0.3786, 0.4567, 0.3634, 0.4184, 0.3266], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0240, 0.0257, 0.0271, 0.0270, 0.0245, 0.0282, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:20:01,481 INFO [finetune.py:976] (5/7) Epoch 17, batch 850, loss[loss=0.1847, simple_loss=0.2579, pruned_loss=0.05574, over 4917.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2546, pruned_loss=0.05806, over 944302.26 frames. ], batch size: 37, lr: 3.41e-03, grad_scale: 64.0 +2023-03-26 20:20:14,867 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.9595, 4.3263, 4.4757, 4.7590, 4.6945, 4.3618, 5.0278, 1.5350], + device='cuda:5'), covar=tensor([0.0530, 0.0759, 0.0719, 0.0715, 0.1090, 0.1513, 0.0508, 0.5708], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0244, 0.0275, 0.0293, 0.0334, 0.0280, 0.0300, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:20:24,851 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2352, 2.3342, 2.9070, 1.7325, 2.4484, 2.7257, 2.1479, 2.8500], + device='cuda:5'), covar=tensor([0.1522, 0.2110, 0.1457, 0.2255, 0.1016, 0.1533, 0.2772, 0.0818], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0203, 0.0189, 0.0188, 0.0175, 0.0213, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:20:35,312 INFO [finetune.py:976] (5/7) Epoch 17, batch 900, loss[loss=0.2051, simple_loss=0.2686, pruned_loss=0.0708, over 4911.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2523, pruned_loss=0.05748, over 947396.68 frames. ], batch size: 43, lr: 3.41e-03, grad_scale: 64.0 +2023-03-26 20:20:38,308 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92547.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:20:38,822 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.037e+02 1.480e+02 1.791e+02 2.296e+02 4.324e+02, threshold=3.582e+02, percent-clipped=2.0 +2023-03-26 20:20:59,734 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7530, 1.6951, 2.2187, 2.0024, 1.8510, 4.4338, 1.6787, 1.8771], + device='cuda:5'), covar=tensor([0.0956, 0.1856, 0.1152, 0.0976, 0.1622, 0.0195, 0.1542, 0.1829], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0078, 0.0092, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:21:08,735 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0758, 1.3711, 1.4253, 1.3071, 1.5141, 2.5118, 1.2740, 1.5074], + device='cuda:5'), covar=tensor([0.1054, 0.1786, 0.0990, 0.0969, 0.1558, 0.0390, 0.1542, 0.1680], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0078, 0.0092, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:21:15,096 INFO [finetune.py:976] (5/7) Epoch 17, batch 950, loss[loss=0.1542, simple_loss=0.2261, pruned_loss=0.04108, over 4781.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.2502, pruned_loss=0.05721, over 948043.45 frames. ], batch size: 29, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:21:16,913 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=92595.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:21:37,251 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92611.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:21:45,581 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8811, 4.3208, 4.1967, 2.0766, 4.3703, 3.4246, 0.7371, 2.9918], + device='cuda:5'), covar=tensor([0.2764, 0.2021, 0.1320, 0.3366, 0.0789, 0.0844, 0.4731, 0.1613], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0174, 0.0158, 0.0128, 0.0157, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:21:54,769 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92625.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:22:05,399 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92637.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:22:13,801 INFO [finetune.py:976] (5/7) Epoch 17, batch 1000, loss[loss=0.2522, simple_loss=0.311, pruned_loss=0.09669, over 4732.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2509, pruned_loss=0.05712, over 949286.91 frames. ], batch size: 59, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:22:20,428 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.492e+01 1.711e+02 2.074e+02 2.603e+02 6.251e+02, threshold=4.148e+02, percent-clipped=4.0 +2023-03-26 20:22:27,819 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=92659.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:22:43,769 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-26 20:22:45,214 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=92685.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:22:45,852 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92686.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:22:50,930 INFO [finetune.py:976] (5/7) Epoch 17, batch 1050, loss[loss=0.2071, simple_loss=0.2754, pruned_loss=0.06937, over 4910.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2529, pruned_loss=0.05718, over 950133.16 frames. ], batch size: 36, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:23:08,342 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92720.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:23:16,505 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 20:23:23,716 INFO [finetune.py:976] (5/7) Epoch 17, batch 1100, loss[loss=0.1628, simple_loss=0.2414, pruned_loss=0.04205, over 4753.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2531, pruned_loss=0.05681, over 950667.80 frames. ], batch size: 27, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:23:27,195 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.694e+02 2.013e+02 2.338e+02 4.806e+02, threshold=4.026e+02, percent-clipped=2.0 +2023-03-26 20:23:48,950 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92781.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:23:57,181 INFO [finetune.py:976] (5/7) Epoch 17, batch 1150, loss[loss=0.185, simple_loss=0.2632, pruned_loss=0.05337, over 4844.00 frames. ], tot_loss[loss=0.1845, simple_loss=0.2541, pruned_loss=0.0574, over 951271.66 frames. ], batch size: 44, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:24:22,927 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92831.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:24:31,079 INFO [finetune.py:976] (5/7) Epoch 17, batch 1200, loss[loss=0.1586, simple_loss=0.2353, pruned_loss=0.04098, over 4821.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.253, pruned_loss=0.05742, over 951096.32 frames. ], batch size: 40, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:24:34,575 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.909e+01 1.547e+02 1.742e+02 2.125e+02 5.044e+02, threshold=3.483e+02, percent-clipped=2.0 +2023-03-26 20:24:50,252 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.22 vs. limit=5.0 +2023-03-26 20:25:04,192 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92892.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:25:04,690 INFO [finetune.py:976] (5/7) Epoch 17, batch 1250, loss[loss=0.1586, simple_loss=0.2168, pruned_loss=0.05017, over 4772.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2506, pruned_loss=0.05654, over 952691.87 frames. ], batch size: 28, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:25:08,903 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92899.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:25:11,786 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3922, 1.2247, 1.7265, 1.2153, 1.3768, 1.5503, 1.2598, 1.6474], + device='cuda:5'), covar=tensor([0.0992, 0.2168, 0.0983, 0.1204, 0.0806, 0.1135, 0.3045, 0.0716], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0203, 0.0189, 0.0189, 0.0176, 0.0213, 0.0216, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:25:20,038 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2718, 1.2814, 1.2118, 1.3540, 1.5458, 1.5720, 1.3746, 1.2737], + device='cuda:5'), covar=tensor([0.0386, 0.0301, 0.0623, 0.0307, 0.0217, 0.0340, 0.0313, 0.0349], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0108, 0.0143, 0.0113, 0.0099, 0.0107, 0.0098, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3599e-05, 8.3371e-05, 1.1339e-04, 8.7106e-05, 7.7032e-05, 7.9113e-05, + 7.3625e-05, 8.3105e-05], device='cuda:5') +2023-03-26 20:25:29,747 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92931.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:25:37,458 INFO [finetune.py:976] (5/7) Epoch 17, batch 1300, loss[loss=0.1888, simple_loss=0.2529, pruned_loss=0.06236, over 4899.00 frames. ], tot_loss[loss=0.1802, simple_loss=0.2483, pruned_loss=0.05602, over 952892.75 frames. ], batch size: 35, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:25:41,346 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.030e+02 1.503e+02 1.790e+02 2.154e+02 4.064e+02, threshold=3.581e+02, percent-clipped=1.0 +2023-03-26 20:25:49,679 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92960.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:25:57,497 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9576, 1.2764, 1.9510, 1.9326, 1.7517, 1.7204, 1.8283, 1.8589], + device='cuda:5'), covar=tensor([0.3770, 0.4185, 0.3409, 0.3717, 0.4906, 0.3595, 0.4587, 0.3041], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0239, 0.0257, 0.0271, 0.0270, 0.0244, 0.0281, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:25:59,971 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92975.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:26:03,543 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92981.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:26:09,527 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5903, 1.5611, 1.2994, 1.4970, 1.9971, 1.8978, 1.5500, 1.4406], + device='cuda:5'), covar=tensor([0.0305, 0.0304, 0.0594, 0.0332, 0.0194, 0.0412, 0.0316, 0.0387], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0107, 0.0143, 0.0112, 0.0098, 0.0106, 0.0098, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.3122e-05, 8.2857e-05, 1.1281e-04, 8.6495e-05, 7.6523e-05, 7.8653e-05, + 7.3303e-05, 8.2552e-05], device='cuda:5') +2023-03-26 20:26:10,768 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92992.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:26:11,263 INFO [finetune.py:976] (5/7) Epoch 17, batch 1350, loss[loss=0.1527, simple_loss=0.2226, pruned_loss=0.04142, over 4825.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.2488, pruned_loss=0.05612, over 953470.37 frames. ], batch size: 25, lr: 3.40e-03, grad_scale: 64.0 +2023-03-26 20:26:18,014 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4825, 2.3089, 1.8728, 0.9175, 2.1776, 1.9320, 1.7633, 2.1741], + device='cuda:5'), covar=tensor([0.0850, 0.0815, 0.1615, 0.1997, 0.1369, 0.2112, 0.2138, 0.0865], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0195, 0.0199, 0.0183, 0.0212, 0.0207, 0.0223, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:26:23,846 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93010.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:26:25,150 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-26 20:26:29,273 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3148, 2.8712, 2.7644, 1.1924, 3.0065, 2.3125, 0.7476, 1.8601], + device='cuda:5'), covar=tensor([0.2686, 0.2522, 0.1926, 0.3784, 0.1506, 0.1105, 0.4328, 0.1876], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0176, 0.0160, 0.0129, 0.0158, 0.0123, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:26:42,160 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6677, 3.4601, 3.2638, 1.6507, 3.6687, 2.8644, 1.0341, 2.4500], + device='cuda:5'), covar=tensor([0.3009, 0.2011, 0.1732, 0.3417, 0.1110, 0.0930, 0.4230, 0.1552], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0176, 0.0160, 0.0129, 0.0158, 0.0123, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:26:51,526 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93036.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:26:51,538 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93036.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:27:00,932 INFO [finetune.py:976] (5/7) Epoch 17, batch 1400, loss[loss=0.2053, simple_loss=0.289, pruned_loss=0.06074, over 4908.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.251, pruned_loss=0.05632, over 951119.24 frames. ], batch size: 36, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:27:08,967 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.549e+02 1.883e+02 2.310e+02 4.523e+02, threshold=3.767e+02, percent-clipped=3.0 +2023-03-26 20:27:11,578 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-26 20:27:34,705 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93071.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:27:39,785 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93076.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:27:50,100 INFO [finetune.py:976] (5/7) Epoch 17, batch 1450, loss[loss=0.1735, simple_loss=0.2492, pruned_loss=0.04891, over 4891.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2524, pruned_loss=0.05666, over 949769.17 frames. ], batch size: 43, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:27:53,148 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93097.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 20:28:06,006 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93115.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:28:12,351 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9524, 1.9982, 1.8570, 2.0004, 1.5456, 4.6599, 1.7490, 2.1039], + device='cuda:5'), covar=tensor([0.3183, 0.2295, 0.1986, 0.2198, 0.1568, 0.0099, 0.2347, 0.1298], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0123, 0.0113, 0.0096, 0.0096, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:28:21,493 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4399, 1.4213, 1.2666, 1.4842, 1.8165, 1.6548, 1.4274, 1.2893], + device='cuda:5'), covar=tensor([0.0278, 0.0297, 0.0561, 0.0309, 0.0170, 0.0464, 0.0320, 0.0380], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0108, 0.0143, 0.0112, 0.0098, 0.0107, 0.0098, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.3122e-05, 8.3324e-05, 1.1281e-04, 8.6539e-05, 7.6713e-05, 7.8955e-05, + 7.3288e-05, 8.2594e-05], device='cuda:5') +2023-03-26 20:28:23,788 INFO [finetune.py:976] (5/7) Epoch 17, batch 1500, loss[loss=0.1503, simple_loss=0.2183, pruned_loss=0.04119, over 4837.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2541, pruned_loss=0.05754, over 952102.68 frames. ], batch size: 25, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:28:25,103 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4778, 2.7848, 2.4284, 1.8187, 2.5651, 2.7501, 2.8549, 2.3448], + device='cuda:5'), covar=tensor([0.0619, 0.0554, 0.0787, 0.0893, 0.0701, 0.0792, 0.0561, 0.1021], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0135, 0.0142, 0.0123, 0.0124, 0.0141, 0.0142, 0.0165], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:28:27,863 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.651e+02 1.993e+02 2.270e+02 5.642e+02, threshold=3.987e+02, percent-clipped=1.0 +2023-03-26 20:28:47,150 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93176.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:28:53,719 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93187.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:28:57,278 INFO [finetune.py:976] (5/7) Epoch 17, batch 1550, loss[loss=0.1337, simple_loss=0.2086, pruned_loss=0.02938, over 4751.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2538, pruned_loss=0.05697, over 953586.09 frames. ], batch size: 28, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:29:13,184 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1961, 2.1334, 2.1394, 1.4774, 2.1897, 2.2030, 2.2995, 1.8493], + device='cuda:5'), covar=tensor([0.0545, 0.0604, 0.0727, 0.0917, 0.0616, 0.0750, 0.0571, 0.1093], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0135, 0.0143, 0.0124, 0.0125, 0.0142, 0.0143, 0.0166], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:29:30,934 INFO [finetune.py:976] (5/7) Epoch 17, batch 1600, loss[loss=0.1407, simple_loss=0.2069, pruned_loss=0.03723, over 4934.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.252, pruned_loss=0.05675, over 956621.64 frames. ], batch size: 33, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:29:34,594 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.059e+02 1.546e+02 1.807e+02 2.216e+02 3.989e+02, threshold=3.613e+02, percent-clipped=1.0 +2023-03-26 20:29:38,730 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93255.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:29:47,824 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-26 20:29:57,407 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93281.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:30:01,028 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93287.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:30:04,615 INFO [finetune.py:976] (5/7) Epoch 17, batch 1650, loss[loss=0.1445, simple_loss=0.2195, pruned_loss=0.03472, over 4817.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.25, pruned_loss=0.05629, over 957238.36 frames. ], batch size: 39, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:30:10,863 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0711, 1.0253, 0.9819, 0.4400, 0.9301, 1.1773, 1.2132, 1.0099], + device='cuda:5'), covar=tensor([0.0875, 0.0682, 0.0580, 0.0600, 0.0601, 0.0778, 0.0463, 0.0756], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0151, 0.0124, 0.0127, 0.0131, 0.0128, 0.0143, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.2006e-05, 1.0978e-04, 8.8585e-05, 9.0733e-05, 9.2222e-05, 9.2385e-05, + 1.0295e-04, 1.0677e-04], device='cuda:5') +2023-03-26 20:30:13,743 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7832, 1.0151, 1.8757, 1.7710, 1.6108, 1.5471, 1.6574, 1.7186], + device='cuda:5'), covar=tensor([0.3681, 0.3820, 0.2990, 0.3547, 0.4257, 0.3223, 0.3901, 0.2972], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0239, 0.0257, 0.0272, 0.0270, 0.0245, 0.0282, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:30:28,979 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93329.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:30:30,702 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93331.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:30:38,309 INFO [finetune.py:976] (5/7) Epoch 17, batch 1700, loss[loss=0.1495, simple_loss=0.2159, pruned_loss=0.04152, over 4772.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.2478, pruned_loss=0.05561, over 957968.20 frames. ], batch size: 54, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:30:41,946 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.003e+02 1.487e+02 1.694e+02 2.142e+02 3.933e+02, threshold=3.388e+02, percent-clipped=2.0 +2023-03-26 20:30:53,818 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93366.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:31:00,202 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-03-26 20:31:00,357 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93376.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:31:11,622 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93392.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 20:31:12,184 INFO [finetune.py:976] (5/7) Epoch 17, batch 1750, loss[loss=0.1868, simple_loss=0.259, pruned_loss=0.05737, over 4912.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.2492, pruned_loss=0.05575, over 957734.70 frames. ], batch size: 37, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:31:13,559 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9124, 1.6713, 2.2816, 1.6541, 2.0508, 2.1062, 1.6507, 2.2903], + device='cuda:5'), covar=tensor([0.1120, 0.1840, 0.1205, 0.1587, 0.0672, 0.1302, 0.2386, 0.0637], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0203, 0.0190, 0.0189, 0.0176, 0.0213, 0.0217, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:31:22,725 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1204, 1.9535, 1.6829, 1.8683, 1.8888, 1.9061, 1.8376, 2.6155], + device='cuda:5'), covar=tensor([0.3544, 0.4519, 0.3278, 0.4301, 0.4160, 0.2309, 0.4213, 0.1607], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0260, 0.0226, 0.0276, 0.0249, 0.0217, 0.0249, 0.0229], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:31:33,284 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93424.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:31:48,778 INFO [finetune.py:976] (5/7) Epoch 17, batch 1800, loss[loss=0.183, simple_loss=0.2591, pruned_loss=0.05347, over 4899.00 frames. ], tot_loss[loss=0.1816, simple_loss=0.2516, pruned_loss=0.05576, over 957256.77 frames. ], batch size: 36, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:31:56,889 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.552e+02 1.846e+02 2.179e+02 3.576e+02, threshold=3.692e+02, percent-clipped=3.0 +2023-03-26 20:32:20,905 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93471.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:32:28,819 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 20:32:37,026 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8575, 1.2007, 0.8027, 1.5280, 2.2676, 1.3060, 1.4077, 1.5655], + device='cuda:5'), covar=tensor([0.1452, 0.2278, 0.2074, 0.1306, 0.1769, 0.2007, 0.1576, 0.2088], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0110, 0.0092, 0.0119, 0.0094, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 20:32:41,048 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93487.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:32:47,412 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7486, 1.0196, 1.7955, 1.7275, 1.5640, 1.4959, 1.6715, 1.6995], + device='cuda:5'), covar=tensor([0.3173, 0.3253, 0.2764, 0.2910, 0.3981, 0.3169, 0.3561, 0.2645], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0239, 0.0257, 0.0271, 0.0269, 0.0245, 0.0281, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:32:49,595 INFO [finetune.py:976] (5/7) Epoch 17, batch 1850, loss[loss=0.1883, simple_loss=0.253, pruned_loss=0.06182, over 4783.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2514, pruned_loss=0.05568, over 955804.40 frames. ], batch size: 29, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:33:20,423 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93535.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:33:26,142 INFO [finetune.py:976] (5/7) Epoch 17, batch 1900, loss[loss=0.1662, simple_loss=0.2344, pruned_loss=0.04899, over 4918.00 frames. ], tot_loss[loss=0.181, simple_loss=0.2515, pruned_loss=0.05528, over 955637.12 frames. ], batch size: 33, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:33:30,349 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.241e+02 1.618e+02 1.925e+02 2.327e+02 3.543e+02, threshold=3.851e+02, percent-clipped=0.0 +2023-03-26 20:33:34,112 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93555.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:33:55,470 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93587.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:33:59,456 INFO [finetune.py:976] (5/7) Epoch 17, batch 1950, loss[loss=0.1755, simple_loss=0.2522, pruned_loss=0.04943, over 4897.00 frames. ], tot_loss[loss=0.1808, simple_loss=0.2508, pruned_loss=0.05542, over 954431.98 frames. ], batch size: 43, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:34:06,616 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93603.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:34:25,029 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93631.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:34:27,389 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93635.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:34:32,610 INFO [finetune.py:976] (5/7) Epoch 17, batch 2000, loss[loss=0.1563, simple_loss=0.232, pruned_loss=0.04027, over 4855.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2482, pruned_loss=0.05462, over 956029.43 frames. ], batch size: 44, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:34:36,988 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 20:34:37,204 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.489e+01 1.528e+02 1.753e+02 2.103e+02 5.258e+02, threshold=3.506e+02, percent-clipped=1.0 +2023-03-26 20:34:48,139 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93666.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:34:56,476 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93679.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:35:04,771 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-03-26 20:35:05,826 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93692.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:35:06,327 INFO [finetune.py:976] (5/7) Epoch 17, batch 2050, loss[loss=0.1589, simple_loss=0.2217, pruned_loss=0.04803, over 4873.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2454, pruned_loss=0.05371, over 957724.95 frames. ], batch size: 31, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:35:07,611 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2400, 1.8957, 2.5828, 4.2293, 3.0330, 3.0007, 1.1590, 3.5031], + device='cuda:5'), covar=tensor([0.1762, 0.1478, 0.1479, 0.0671, 0.0760, 0.1377, 0.1925, 0.0490], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0165, 0.0101, 0.0136, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 20:35:20,567 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93714.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:35:37,804 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93740.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:35:39,566 INFO [finetune.py:976] (5/7) Epoch 17, batch 2100, loss[loss=0.1353, simple_loss=0.2093, pruned_loss=0.03062, over 4775.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2448, pruned_loss=0.05363, over 956177.40 frames. ], batch size: 27, lr: 3.40e-03, grad_scale: 32.0 +2023-03-26 20:35:43,619 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.402e+01 1.568e+02 1.860e+02 2.232e+02 5.340e+02, threshold=3.720e+02, percent-clipped=4.0 +2023-03-26 20:35:58,013 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93770.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:35:58,585 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93771.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:36:13,275 INFO [finetune.py:976] (5/7) Epoch 17, batch 2150, loss[loss=0.279, simple_loss=0.3388, pruned_loss=0.1096, over 4770.00 frames. ], tot_loss[loss=0.179, simple_loss=0.2479, pruned_loss=0.05499, over 955694.68 frames. ], batch size: 54, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:36:27,087 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-26 20:36:31,174 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=93819.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:36:37,562 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 20:36:38,626 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93831.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:36:47,328 INFO [finetune.py:976] (5/7) Epoch 17, batch 2200, loss[loss=0.201, simple_loss=0.2686, pruned_loss=0.06674, over 4840.00 frames. ], tot_loss[loss=0.1801, simple_loss=0.2494, pruned_loss=0.05545, over 953216.78 frames. ], batch size: 44, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:36:51,478 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.569e+02 1.869e+02 2.308e+02 4.137e+02, threshold=3.738e+02, percent-clipped=1.0 +2023-03-26 20:37:01,141 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7462, 1.0387, 1.7573, 1.7077, 1.4961, 1.4245, 1.6141, 1.6182], + device='cuda:5'), covar=tensor([0.3268, 0.3509, 0.3052, 0.3289, 0.4250, 0.3479, 0.3960, 0.2964], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0239, 0.0258, 0.0272, 0.0270, 0.0246, 0.0282, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:37:36,157 INFO [finetune.py:976] (5/7) Epoch 17, batch 2250, loss[loss=0.1926, simple_loss=0.2642, pruned_loss=0.06045, over 4761.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2511, pruned_loss=0.05587, over 954703.10 frames. ], batch size: 26, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:37:52,589 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-26 20:37:53,161 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6482, 1.6204, 1.4601, 1.6826, 2.0470, 1.9085, 1.6605, 1.4883], + device='cuda:5'), covar=tensor([0.0307, 0.0304, 0.0554, 0.0306, 0.0194, 0.0404, 0.0251, 0.0369], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0106, 0.0141, 0.0111, 0.0098, 0.0106, 0.0097, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2761e-05, 8.2338e-05, 1.1161e-04, 8.5659e-05, 7.6429e-05, 7.8545e-05, + 7.2649e-05, 8.2387e-05], device='cuda:5') +2023-03-26 20:38:06,179 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-03-26 20:38:22,612 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-03-26 20:38:30,051 INFO [finetune.py:976] (5/7) Epoch 17, batch 2300, loss[loss=0.2133, simple_loss=0.2818, pruned_loss=0.07237, over 4812.00 frames. ], tot_loss[loss=0.1819, simple_loss=0.2517, pruned_loss=0.05604, over 954980.77 frames. ], batch size: 30, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:38:34,185 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.601e+02 1.890e+02 2.328e+02 3.292e+02, threshold=3.781e+02, percent-clipped=0.0 +2023-03-26 20:38:51,893 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.44 vs. limit=5.0 +2023-03-26 20:38:56,123 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93981.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:39:03,825 INFO [finetune.py:976] (5/7) Epoch 17, batch 2350, loss[loss=0.1826, simple_loss=0.2547, pruned_loss=0.05523, over 4852.00 frames. ], tot_loss[loss=0.1803, simple_loss=0.2501, pruned_loss=0.05522, over 955819.54 frames. ], batch size: 44, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:39:34,635 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-26 20:39:37,901 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94042.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:39:38,383 INFO [finetune.py:976] (5/7) Epoch 17, batch 2400, loss[loss=0.17, simple_loss=0.2395, pruned_loss=0.05027, over 4836.00 frames. ], tot_loss[loss=0.179, simple_loss=0.2481, pruned_loss=0.05497, over 956850.31 frames. ], batch size: 40, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:39:42,000 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3357, 3.7970, 3.9462, 4.1715, 4.0966, 3.8738, 4.4328, 1.3899], + device='cuda:5'), covar=tensor([0.0795, 0.0798, 0.0801, 0.1041, 0.1259, 0.1460, 0.0629, 0.5489], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0245, 0.0276, 0.0292, 0.0334, 0.0281, 0.0302, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:39:42,505 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.024e+02 1.475e+02 1.781e+02 2.110e+02 4.538e+02, threshold=3.563e+02, percent-clipped=2.0 +2023-03-26 20:40:11,637 INFO [finetune.py:976] (5/7) Epoch 17, batch 2450, loss[loss=0.1902, simple_loss=0.2631, pruned_loss=0.05865, over 4851.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2462, pruned_loss=0.05429, over 954290.15 frames. ], batch size: 44, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:40:25,649 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-26 20:40:34,649 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94126.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:40:45,406 INFO [finetune.py:976] (5/7) Epoch 17, batch 2500, loss[loss=0.2271, simple_loss=0.3034, pruned_loss=0.07537, over 4824.00 frames. ], tot_loss[loss=0.1815, simple_loss=0.2503, pruned_loss=0.05637, over 955464.87 frames. ], batch size: 40, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:40:49,550 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.530e+01 1.683e+02 1.909e+02 2.220e+02 4.342e+02, threshold=3.819e+02, percent-clipped=2.0 +2023-03-26 20:40:50,321 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6072, 1.5424, 1.3211, 1.6899, 2.0518, 1.6814, 1.3520, 1.3647], + device='cuda:5'), covar=tensor([0.2278, 0.2046, 0.1995, 0.1621, 0.1755, 0.1358, 0.2588, 0.1992], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0208, 0.0212, 0.0192, 0.0241, 0.0185, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:40:57,259 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3936, 1.4041, 1.8137, 1.7160, 1.4843, 3.2604, 1.3251, 1.4597], + device='cuda:5'), covar=tensor([0.1058, 0.1857, 0.1184, 0.1048, 0.1778, 0.0277, 0.1563, 0.1885], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0079, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:41:04,835 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.9535, 4.4161, 4.6715, 4.5731, 4.4317, 4.3392, 5.1397, 1.8011], + device='cuda:5'), covar=tensor([0.1073, 0.1596, 0.1341, 0.2288, 0.1747, 0.2252, 0.0856, 0.7386], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0245, 0.0276, 0.0293, 0.0335, 0.0281, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:41:18,603 INFO [finetune.py:976] (5/7) Epoch 17, batch 2550, loss[loss=0.1931, simple_loss=0.2717, pruned_loss=0.05725, over 4812.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2542, pruned_loss=0.05729, over 957341.82 frames. ], batch size: 45, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:41:29,344 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7319, 3.6634, 3.4622, 1.6471, 3.7981, 2.8821, 0.8788, 2.4944], + device='cuda:5'), covar=tensor([0.2168, 0.1821, 0.1576, 0.3379, 0.1030, 0.0919, 0.4278, 0.1470], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0175, 0.0158, 0.0129, 0.0158, 0.0123, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:41:38,786 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94223.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:41:52,380 INFO [finetune.py:976] (5/7) Epoch 17, batch 2600, loss[loss=0.2063, simple_loss=0.2838, pruned_loss=0.06439, over 4925.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2547, pruned_loss=0.05731, over 956928.64 frames. ], batch size: 42, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:41:56,025 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.647e+02 1.955e+02 2.265e+02 3.573e+02, threshold=3.911e+02, percent-clipped=0.0 +2023-03-26 20:42:13,948 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7435, 1.8528, 2.4077, 2.1233, 2.0324, 4.5038, 1.7989, 2.0952], + device='cuda:5'), covar=tensor([0.0919, 0.1717, 0.0988, 0.0921, 0.1428, 0.0177, 0.1378, 0.1622], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0078, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:42:19,678 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94284.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 20:42:25,432 INFO [finetune.py:976] (5/7) Epoch 17, batch 2650, loss[loss=0.1802, simple_loss=0.2548, pruned_loss=0.05286, over 4902.00 frames. ], tot_loss[loss=0.1843, simple_loss=0.2546, pruned_loss=0.05701, over 956914.09 frames. ], batch size: 36, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:43:04,262 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4952, 1.1539, 0.7498, 1.3601, 1.8643, 0.7988, 1.2314, 1.3970], + device='cuda:5'), covar=tensor([0.1510, 0.1985, 0.1745, 0.1222, 0.1960, 0.2200, 0.1452, 0.1887], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0092, 0.0119, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 20:43:04,638 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 20:43:12,615 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94337.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:43:20,898 INFO [finetune.py:976] (5/7) Epoch 17, batch 2700, loss[loss=0.196, simple_loss=0.2723, pruned_loss=0.05983, over 4837.00 frames. ], tot_loss[loss=0.1843, simple_loss=0.2544, pruned_loss=0.0571, over 957038.07 frames. ], batch size: 49, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:43:21,631 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0280, 0.8790, 0.8352, 1.1761, 1.1715, 1.1382, 1.0025, 0.8684], + device='cuda:5'), covar=tensor([0.0393, 0.0360, 0.0691, 0.0335, 0.0304, 0.0510, 0.0358, 0.0472], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0107, 0.0142, 0.0112, 0.0099, 0.0107, 0.0098, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3042e-05, 8.2570e-05, 1.1207e-04, 8.6138e-05, 7.7043e-05, 7.9073e-05, + 7.3191e-05, 8.2885e-05], device='cuda:5') +2023-03-26 20:43:27,720 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94348.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:43:28,202 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.025e+02 1.514e+02 1.766e+02 2.145e+02 4.618e+02, threshold=3.532e+02, percent-clipped=2.0 +2023-03-26 20:43:40,944 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 20:43:51,602 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8155, 1.6621, 2.3479, 3.5659, 2.6161, 2.6474, 1.1427, 2.9241], + device='cuda:5'), covar=tensor([0.1821, 0.1513, 0.1343, 0.0605, 0.0721, 0.1432, 0.1956, 0.0522], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0167, 0.0102, 0.0138, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 20:44:10,183 INFO [finetune.py:976] (5/7) Epoch 17, batch 2750, loss[loss=0.1966, simple_loss=0.2594, pruned_loss=0.06685, over 4825.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.2515, pruned_loss=0.05651, over 956632.27 frames. ], batch size: 33, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:44:20,195 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94409.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 20:44:31,944 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94426.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:44:32,599 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6769, 0.6464, 1.7679, 1.5802, 1.4899, 1.4325, 1.5127, 1.6574], + device='cuda:5'), covar=tensor([0.3048, 0.3364, 0.2624, 0.3172, 0.3775, 0.3130, 0.3501, 0.2515], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0240, 0.0259, 0.0274, 0.0271, 0.0246, 0.0284, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:44:43,085 INFO [finetune.py:976] (5/7) Epoch 17, batch 2800, loss[loss=0.1429, simple_loss=0.205, pruned_loss=0.04045, over 4873.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.247, pruned_loss=0.05487, over 958464.05 frames. ], batch size: 34, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:44:47,185 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.577e+01 1.587e+02 1.887e+02 2.313e+02 4.372e+02, threshold=3.775e+02, percent-clipped=5.0 +2023-03-26 20:44:55,812 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1781, 2.0381, 2.0951, 1.1526, 2.3520, 2.6018, 2.1507, 1.9843], + device='cuda:5'), covar=tensor([0.0929, 0.0672, 0.0513, 0.0678, 0.0530, 0.0594, 0.0465, 0.0550], + device='cuda:5'), in_proj_covar=tensor([0.0127, 0.0154, 0.0125, 0.0129, 0.0133, 0.0131, 0.0145, 0.0150], + device='cuda:5'), out_proj_covar=tensor([9.3347e-05, 1.1134e-04, 8.9849e-05, 9.2042e-05, 9.3824e-05, 9.4270e-05, + 1.0477e-04, 1.0837e-04], device='cuda:5') +2023-03-26 20:45:03,348 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=94474.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:45:09,937 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3545, 1.4733, 1.2320, 1.5565, 1.7018, 1.6475, 1.4255, 1.4065], + device='cuda:5'), covar=tensor([0.0467, 0.0306, 0.0613, 0.0270, 0.0230, 0.0409, 0.0372, 0.0325], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0106, 0.0141, 0.0111, 0.0098, 0.0107, 0.0097, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2620e-05, 8.2064e-05, 1.1144e-04, 8.5567e-05, 7.6462e-05, 7.8809e-05, + 7.2894e-05, 8.2584e-05], device='cuda:5') +2023-03-26 20:45:16,199 INFO [finetune.py:976] (5/7) Epoch 17, batch 2850, loss[loss=0.1669, simple_loss=0.245, pruned_loss=0.04443, over 4853.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2466, pruned_loss=0.05454, over 956886.68 frames. ], batch size: 44, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:45:49,614 INFO [finetune.py:976] (5/7) Epoch 17, batch 2900, loss[loss=0.1564, simple_loss=0.2399, pruned_loss=0.03648, over 4735.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.2491, pruned_loss=0.05589, over 954626.66 frames. ], batch size: 59, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:45:53,201 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.550e+02 1.801e+02 2.117e+02 3.911e+02, threshold=3.601e+02, percent-clipped=1.0 +2023-03-26 20:46:12,851 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94579.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 20:46:22,374 INFO [finetune.py:976] (5/7) Epoch 17, batch 2950, loss[loss=0.1844, simple_loss=0.2659, pruned_loss=0.05146, over 4762.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2527, pruned_loss=0.05745, over 952819.33 frames. ], batch size: 59, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:46:37,207 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-03-26 20:46:46,987 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7063, 1.7264, 1.5919, 1.6774, 1.1683, 3.5631, 1.4472, 1.8167], + device='cuda:5'), covar=tensor([0.3553, 0.2595, 0.2167, 0.2552, 0.1926, 0.0262, 0.2460, 0.1289], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0114, 0.0120, 0.0122, 0.0113, 0.0096, 0.0096, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:46:48,226 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4727, 1.3534, 1.4976, 0.8301, 1.5068, 1.4939, 1.4705, 1.2973], + device='cuda:5'), covar=tensor([0.0640, 0.0834, 0.0706, 0.1012, 0.0892, 0.0786, 0.0659, 0.1330], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0134, 0.0140, 0.0123, 0.0124, 0.0140, 0.0141, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:46:52,131 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94637.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:46:56,162 INFO [finetune.py:976] (5/7) Epoch 17, batch 3000, loss[loss=0.1702, simple_loss=0.2515, pruned_loss=0.04448, over 4850.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2528, pruned_loss=0.05729, over 953920.09 frames. ], batch size: 31, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:46:56,162 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 20:47:06,772 INFO [finetune.py:1010] (5/7) Epoch 17, validation: loss=0.1562, simple_loss=0.2257, pruned_loss=0.04335, over 2265189.00 frames. +2023-03-26 20:47:06,772 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 20:47:09,682 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 20:47:10,420 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.006e+02 1.605e+02 1.916e+02 2.337e+02 3.800e+02, threshold=3.832e+02, percent-clipped=2.0 +2023-03-26 20:47:33,746 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=94685.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:47:38,998 INFO [finetune.py:976] (5/7) Epoch 17, batch 3050, loss[loss=0.1679, simple_loss=0.256, pruned_loss=0.03989, over 4794.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2541, pruned_loss=0.05717, over 955974.53 frames. ], batch size: 29, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:47:47,347 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94704.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 20:48:19,141 INFO [finetune.py:976] (5/7) Epoch 17, batch 3100, loss[loss=0.1776, simple_loss=0.2347, pruned_loss=0.0602, over 4925.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2514, pruned_loss=0.05612, over 954974.30 frames. ], batch size: 33, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:48:27,692 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.533e+02 1.793e+02 2.269e+02 8.706e+02, threshold=3.585e+02, percent-clipped=3.0 +2023-03-26 20:49:17,517 INFO [finetune.py:976] (5/7) Epoch 17, batch 3150, loss[loss=0.1978, simple_loss=0.2635, pruned_loss=0.06606, over 4829.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.2495, pruned_loss=0.05566, over 953995.27 frames. ], batch size: 30, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:49:51,401 INFO [finetune.py:976] (5/7) Epoch 17, batch 3200, loss[loss=0.1793, simple_loss=0.2493, pruned_loss=0.05462, over 4818.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2465, pruned_loss=0.05427, over 955116.06 frames. ], batch size: 25, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:49:55,532 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.020e+02 1.463e+02 1.766e+02 2.027e+02 4.168e+02, threshold=3.532e+02, percent-clipped=1.0 +2023-03-26 20:50:16,324 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94879.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:50:25,250 INFO [finetune.py:976] (5/7) Epoch 17, batch 3250, loss[loss=0.1779, simple_loss=0.2564, pruned_loss=0.04968, over 4801.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.247, pruned_loss=0.0546, over 953498.36 frames. ], batch size: 45, lr: 3.39e-03, grad_scale: 32.0 +2023-03-26 20:50:48,437 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=94927.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:50:58,674 INFO [finetune.py:976] (5/7) Epoch 17, batch 3300, loss[loss=0.2039, simple_loss=0.2507, pruned_loss=0.0786, over 4771.00 frames. ], tot_loss[loss=0.1812, simple_loss=0.2507, pruned_loss=0.05582, over 951850.33 frames. ], batch size: 26, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 20:51:02,384 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.785e+02 2.188e+02 2.532e+02 5.228e+02, threshold=4.375e+02, percent-clipped=4.0 +2023-03-26 20:51:16,332 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-26 20:51:16,627 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7844, 1.2570, 1.6574, 1.7671, 1.5314, 1.5218, 1.6805, 1.6143], + device='cuda:5'), covar=tensor([0.4230, 0.3901, 0.3658, 0.3623, 0.4910, 0.4085, 0.4387, 0.3368], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0241, 0.0260, 0.0274, 0.0272, 0.0248, 0.0284, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:51:23,790 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3364, 2.2740, 2.2137, 2.4297, 2.0734, 4.8771, 2.2528, 2.7520], + device='cuda:5'), covar=tensor([0.2847, 0.2184, 0.1770, 0.1969, 0.1362, 0.0152, 0.1946, 0.0973], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0122, 0.0113, 0.0096, 0.0096, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 20:51:32,712 INFO [finetune.py:976] (5/7) Epoch 17, batch 3350, loss[loss=0.1688, simple_loss=0.2456, pruned_loss=0.04602, over 4728.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2524, pruned_loss=0.05642, over 952073.50 frames. ], batch size: 59, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 20:51:36,490 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7832, 3.6935, 3.5418, 1.7378, 3.8668, 2.8455, 0.9456, 2.6419], + device='cuda:5'), covar=tensor([0.2496, 0.2049, 0.1640, 0.3310, 0.1035, 0.1007, 0.4143, 0.1419], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0176, 0.0159, 0.0129, 0.0159, 0.0124, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:51:40,201 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95004.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:51:43,883 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-26 20:52:06,015 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3944, 2.2482, 2.0180, 2.2681, 2.1060, 2.1804, 2.1970, 2.9205], + device='cuda:5'), covar=tensor([0.3692, 0.4110, 0.3161, 0.3556, 0.3847, 0.2309, 0.3652, 0.1657], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0228, 0.0277, 0.0251, 0.0219, 0.0251, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:52:06,494 INFO [finetune.py:976] (5/7) Epoch 17, batch 3400, loss[loss=0.1561, simple_loss=0.2141, pruned_loss=0.04902, over 4374.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2541, pruned_loss=0.05718, over 953135.50 frames. ], batch size: 19, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:52:10,131 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.553e+02 1.863e+02 2.086e+02 3.757e+02, threshold=3.727e+02, percent-clipped=0.0 +2023-03-26 20:52:12,047 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=95052.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:52:16,221 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8349, 1.7266, 1.5922, 1.9224, 2.1955, 1.9173, 1.6163, 1.4934], + device='cuda:5'), covar=tensor([0.2388, 0.2180, 0.1975, 0.1735, 0.1768, 0.1214, 0.2400, 0.2106], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0210, 0.0213, 0.0192, 0.0242, 0.0187, 0.0216, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:52:19,904 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4731, 2.2016, 2.7173, 1.7023, 2.5879, 2.6660, 2.0116, 2.7588], + device='cuda:5'), covar=tensor([0.1382, 0.1932, 0.1638, 0.2318, 0.0939, 0.1601, 0.2788, 0.0960], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0204, 0.0190, 0.0190, 0.0177, 0.0212, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:52:28,682 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.44 vs. limit=5.0 +2023-03-26 20:52:40,293 INFO [finetune.py:976] (5/7) Epoch 17, batch 3450, loss[loss=0.2177, simple_loss=0.2869, pruned_loss=0.07421, over 4746.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2548, pruned_loss=0.05753, over 953980.97 frames. ], batch size: 59, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:52:45,929 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3725, 2.3211, 1.7885, 2.2418, 2.2024, 1.9238, 2.6012, 2.3073], + device='cuda:5'), covar=tensor([0.1236, 0.1963, 0.2879, 0.2616, 0.2561, 0.1593, 0.2978, 0.1657], + device='cuda:5'), in_proj_covar=tensor([0.0183, 0.0188, 0.0234, 0.0252, 0.0244, 0.0202, 0.0212, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:52:47,680 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95105.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 20:53:10,911 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-26 20:53:13,090 INFO [finetune.py:976] (5/7) Epoch 17, batch 3500, loss[loss=0.1535, simple_loss=0.2279, pruned_loss=0.03961, over 4816.00 frames. ], tot_loss[loss=0.1835, simple_loss=0.2527, pruned_loss=0.05713, over 954349.29 frames. ], batch size: 45, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:53:17,181 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.619e+02 1.940e+02 2.279e+02 3.817e+02, threshold=3.880e+02, percent-clipped=1.0 +2023-03-26 20:53:35,211 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95166.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 20:53:43,067 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1738, 2.0159, 1.8395, 1.9713, 1.8267, 1.8764, 1.9014, 2.6766], + device='cuda:5'), covar=tensor([0.3423, 0.4118, 0.3187, 0.3964, 0.4340, 0.2337, 0.4018, 0.1536], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0227, 0.0276, 0.0251, 0.0219, 0.0250, 0.0230], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:53:47,156 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6730, 3.2907, 3.1653, 1.5512, 3.4092, 2.6491, 0.9096, 2.3692], + device='cuda:5'), covar=tensor([0.2467, 0.2121, 0.1796, 0.3319, 0.1312, 0.0960, 0.4065, 0.1541], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0176, 0.0160, 0.0129, 0.0159, 0.0124, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:54:05,084 INFO [finetune.py:976] (5/7) Epoch 17, batch 3550, loss[loss=0.1787, simple_loss=0.2545, pruned_loss=0.05149, over 4764.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.25, pruned_loss=0.05631, over 955296.87 frames. ], batch size: 26, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:54:51,060 INFO [finetune.py:976] (5/7) Epoch 17, batch 3600, loss[loss=0.1704, simple_loss=0.2195, pruned_loss=0.06062, over 4111.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2463, pruned_loss=0.05491, over 954002.24 frames. ], batch size: 17, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:54:54,641 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.580e+02 1.871e+02 2.182e+02 4.206e+02, threshold=3.742e+02, percent-clipped=1.0 +2023-03-26 20:55:02,001 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95260.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:55:06,876 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95268.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:55:10,950 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9054, 4.5452, 4.2845, 2.2800, 4.6040, 3.5885, 0.7170, 3.1927], + device='cuda:5'), covar=tensor([0.2412, 0.1967, 0.1403, 0.3324, 0.0840, 0.0856, 0.4916, 0.1535], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0175, 0.0159, 0.0128, 0.0158, 0.0123, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 20:55:24,046 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-03-26 20:55:24,758 INFO [finetune.py:976] (5/7) Epoch 17, batch 3650, loss[loss=0.1788, simple_loss=0.2632, pruned_loss=0.04714, over 4908.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2483, pruned_loss=0.05547, over 955989.11 frames. ], batch size: 37, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:55:29,797 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7903, 1.6549, 1.4979, 1.2986, 1.6169, 1.5771, 1.6184, 2.1938], + device='cuda:5'), covar=tensor([0.3801, 0.3875, 0.3188, 0.3728, 0.3612, 0.2407, 0.3502, 0.1724], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0262, 0.0227, 0.0277, 0.0251, 0.0219, 0.0251, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:55:42,969 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95321.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:55:48,258 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95329.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:55:55,802 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95339.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:55:58,603 INFO [finetune.py:976] (5/7) Epoch 17, batch 3700, loss[loss=0.2132, simple_loss=0.2849, pruned_loss=0.07072, over 4902.00 frames. ], tot_loss[loss=0.1819, simple_loss=0.2515, pruned_loss=0.05616, over 955605.05 frames. ], batch size: 37, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:56:02,229 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.605e+02 1.907e+02 2.409e+02 3.957e+02, threshold=3.813e+02, percent-clipped=4.0 +2023-03-26 20:56:31,736 INFO [finetune.py:976] (5/7) Epoch 17, batch 3750, loss[loss=0.2117, simple_loss=0.2803, pruned_loss=0.0715, over 4866.00 frames. ], tot_loss[loss=0.1821, simple_loss=0.252, pruned_loss=0.05608, over 954796.19 frames. ], batch size: 34, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:56:36,738 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95400.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:56:57,096 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3238, 2.2396, 2.2407, 1.6535, 2.2227, 2.4442, 2.4321, 1.8277], + device='cuda:5'), covar=tensor([0.0556, 0.0550, 0.0700, 0.0907, 0.0637, 0.0661, 0.0555, 0.1142], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0135, 0.0141, 0.0123, 0.0123, 0.0141, 0.0142, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 20:57:04,527 INFO [finetune.py:976] (5/7) Epoch 17, batch 3800, loss[loss=0.1769, simple_loss=0.2544, pruned_loss=0.04974, over 4817.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2533, pruned_loss=0.05659, over 954584.83 frames. ], batch size: 33, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:57:09,546 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.108e+02 1.488e+02 1.737e+02 2.235e+02 4.648e+02, threshold=3.475e+02, percent-clipped=3.0 +2023-03-26 20:57:13,990 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.31 vs. limit=5.0 +2023-03-26 20:57:16,900 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95461.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 20:57:25,262 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.74 vs. limit=2.0 +2023-03-26 20:57:37,563 INFO [finetune.py:976] (5/7) Epoch 17, batch 3850, loss[loss=0.1762, simple_loss=0.2389, pruned_loss=0.05673, over 4924.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.2525, pruned_loss=0.05657, over 954244.21 frames. ], batch size: 38, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:58:10,766 INFO [finetune.py:976] (5/7) Epoch 17, batch 3900, loss[loss=0.1784, simple_loss=0.2503, pruned_loss=0.05329, over 4744.00 frames. ], tot_loss[loss=0.1821, simple_loss=0.2509, pruned_loss=0.05667, over 952840.54 frames. ], batch size: 27, lr: 3.38e-03, grad_scale: 64.0 +2023-03-26 20:58:15,376 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.550e+02 1.834e+02 2.229e+02 4.290e+02, threshold=3.669e+02, percent-clipped=3.0 +2023-03-26 20:58:33,262 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3650, 1.3982, 1.7880, 2.4351, 1.6590, 2.1245, 0.9231, 2.1337], + device='cuda:5'), covar=tensor([0.1744, 0.1426, 0.1002, 0.0806, 0.0953, 0.1545, 0.1503, 0.0610], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0134, 0.0166, 0.0101, 0.0136, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 20:58:46,334 INFO [finetune.py:976] (5/7) Epoch 17, batch 3950, loss[loss=0.1778, simple_loss=0.2382, pruned_loss=0.05869, over 4751.00 frames. ], tot_loss[loss=0.179, simple_loss=0.2477, pruned_loss=0.05516, over 953369.53 frames. ], batch size: 26, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 20:59:04,372 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95616.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:59:05,036 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95617.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:59:13,757 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95624.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 20:59:34,009 INFO [finetune.py:976] (5/7) Epoch 17, batch 4000, loss[loss=0.1543, simple_loss=0.2157, pruned_loss=0.0465, over 4730.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2473, pruned_loss=0.05554, over 955253.82 frames. ], batch size: 23, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 20:59:42,359 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.075e+02 1.540e+02 1.979e+02 2.285e+02 3.877e+02, threshold=3.958e+02, percent-clipped=2.0 +2023-03-26 21:00:12,632 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95678.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:00:26,083 INFO [finetune.py:976] (5/7) Epoch 17, batch 4050, loss[loss=0.2612, simple_loss=0.3221, pruned_loss=0.1001, over 4166.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2527, pruned_loss=0.05766, over 955152.17 frames. ], batch size: 65, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:00:27,351 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95695.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:00:45,768 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 21:00:59,871 INFO [finetune.py:976] (5/7) Epoch 17, batch 4100, loss[loss=0.2023, simple_loss=0.26, pruned_loss=0.07229, over 4745.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2539, pruned_loss=0.05781, over 955816.97 frames. ], batch size: 27, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:01:04,067 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.600e+02 1.864e+02 2.304e+02 4.240e+02, threshold=3.729e+02, percent-clipped=2.0 +2023-03-26 21:01:12,323 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95761.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 21:01:33,032 INFO [finetune.py:976] (5/7) Epoch 17, batch 4150, loss[loss=0.1591, simple_loss=0.2398, pruned_loss=0.03925, over 4783.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2555, pruned_loss=0.05806, over 956627.46 frames. ], batch size: 29, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:01:44,401 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=95809.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 21:01:44,414 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1759, 2.8507, 2.9885, 2.9375, 2.7858, 2.7945, 3.2618, 1.0449], + device='cuda:5'), covar=tensor([0.1486, 0.1825, 0.1547, 0.2087, 0.2348, 0.2544, 0.1695, 0.7114], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0246, 0.0277, 0.0293, 0.0336, 0.0282, 0.0302, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:01:46,234 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95812.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 21:02:06,758 INFO [finetune.py:976] (5/7) Epoch 17, batch 4200, loss[loss=0.1494, simple_loss=0.2272, pruned_loss=0.03581, over 4872.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2557, pruned_loss=0.05763, over 955469.72 frames. ], batch size: 34, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:02:09,289 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95847.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:02:09,901 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9041, 1.3943, 0.8856, 1.7008, 2.1613, 1.5058, 1.6317, 1.7258], + device='cuda:5'), covar=tensor([0.1462, 0.1989, 0.1830, 0.1151, 0.1862, 0.1986, 0.1384, 0.1897], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0092, 0.0118, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:02:11,509 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.143e+02 1.538e+02 1.932e+02 2.354e+02 8.206e+02, threshold=3.863e+02, percent-clipped=2.0 +2023-03-26 21:02:27,893 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95873.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 21:02:29,086 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6854, 1.5983, 1.9353, 1.1415, 1.6854, 1.8102, 1.4711, 2.0037], + device='cuda:5'), covar=tensor([0.1224, 0.2084, 0.1352, 0.1933, 0.0959, 0.1339, 0.3063, 0.0876], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0203, 0.0189, 0.0189, 0.0176, 0.0211, 0.0215, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:02:39,922 INFO [finetune.py:976] (5/7) Epoch 17, batch 4250, loss[loss=0.1867, simple_loss=0.2466, pruned_loss=0.0634, over 4416.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2539, pruned_loss=0.05766, over 955711.40 frames. ], batch size: 19, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:02:43,533 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8481, 3.3810, 3.5271, 3.7459, 3.6428, 3.3762, 3.9162, 1.1944], + device='cuda:5'), covar=tensor([0.0875, 0.0850, 0.0867, 0.0983, 0.1317, 0.1742, 0.0871, 0.5469], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0245, 0.0276, 0.0292, 0.0336, 0.0282, 0.0301, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:02:50,098 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95908.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:02:55,861 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95916.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:03:02,186 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95924.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:03:12,437 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0550, 1.7798, 2.3564, 1.4567, 2.0898, 2.2897, 1.6555, 2.4980], + device='cuda:5'), covar=tensor([0.1225, 0.1766, 0.1436, 0.2161, 0.0958, 0.1410, 0.2554, 0.0690], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0203, 0.0189, 0.0189, 0.0176, 0.0211, 0.0215, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:03:13,534 INFO [finetune.py:976] (5/7) Epoch 17, batch 4300, loss[loss=0.186, simple_loss=0.2461, pruned_loss=0.06294, over 4912.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2513, pruned_loss=0.05699, over 956628.94 frames. ], batch size: 46, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:03:18,258 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.024e+02 1.465e+02 1.654e+02 2.123e+02 3.225e+02, threshold=3.308e+02, percent-clipped=0.0 +2023-03-26 21:03:27,770 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=95964.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:03:32,759 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 21:03:33,598 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=95972.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:03:34,210 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95973.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:03:39,475 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95980.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:03:47,254 INFO [finetune.py:976] (5/7) Epoch 17, batch 4350, loss[loss=0.2146, simple_loss=0.2788, pruned_loss=0.07516, over 4911.00 frames. ], tot_loss[loss=0.1786, simple_loss=0.2467, pruned_loss=0.05519, over 957069.70 frames. ], batch size: 43, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:03:48,533 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95995.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:03:54,937 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.34 vs. limit=5.0 +2023-03-26 21:04:19,595 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4465, 2.4464, 1.9645, 1.0812, 2.1562, 1.8643, 1.7540, 2.1695], + device='cuda:5'), covar=tensor([0.0970, 0.0748, 0.1770, 0.2086, 0.1611, 0.2265, 0.2259, 0.1024], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0196, 0.0199, 0.0183, 0.0212, 0.0207, 0.0223, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:04:20,848 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96041.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:04:21,942 INFO [finetune.py:976] (5/7) Epoch 17, batch 4400, loss[loss=0.1949, simple_loss=0.2717, pruned_loss=0.05908, over 4834.00 frames. ], tot_loss[loss=0.1788, simple_loss=0.247, pruned_loss=0.05525, over 955976.44 frames. ], batch size: 49, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:04:22,005 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=96043.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:04:28,702 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.398e+01 1.490e+02 1.749e+02 2.200e+02 3.209e+02, threshold=3.497e+02, percent-clipped=0.0 +2023-03-26 21:04:58,586 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5778, 1.6336, 1.3405, 1.6581, 1.9573, 1.7826, 1.5499, 1.4212], + device='cuda:5'), covar=tensor([0.0320, 0.0284, 0.0640, 0.0323, 0.0187, 0.0534, 0.0361, 0.0385], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0108, 0.0144, 0.0113, 0.0100, 0.0109, 0.0098, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4644e-05, 8.3521e-05, 1.1343e-04, 8.6725e-05, 7.7838e-05, 8.0387e-05, + 7.3427e-05, 8.3960e-05], device='cuda:5') +2023-03-26 21:04:59,721 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96080.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:05:13,436 INFO [finetune.py:976] (5/7) Epoch 17, batch 4450, loss[loss=0.1673, simple_loss=0.2447, pruned_loss=0.04499, over 4895.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.2504, pruned_loss=0.0561, over 955561.55 frames. ], batch size: 43, lr: 3.38e-03, grad_scale: 32.0 +2023-03-26 21:05:58,465 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96141.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:05:59,554 INFO [finetune.py:976] (5/7) Epoch 17, batch 4500, loss[loss=0.1585, simple_loss=0.239, pruned_loss=0.039, over 4768.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.2522, pruned_loss=0.05666, over 954217.29 frames. ], batch size: 26, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:06:03,840 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.724e+02 1.946e+02 2.358e+02 4.504e+02, threshold=3.891e+02, percent-clipped=3.0 +2023-03-26 21:06:13,985 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96165.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:06:15,775 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96168.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 21:06:17,950 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 21:06:33,244 INFO [finetune.py:976] (5/7) Epoch 17, batch 4550, loss[loss=0.1853, simple_loss=0.2659, pruned_loss=0.05229, over 4795.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2536, pruned_loss=0.05696, over 954544.99 frames. ], batch size: 45, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:06:39,493 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96203.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 21:06:54,577 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96226.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:07:07,173 INFO [finetune.py:976] (5/7) Epoch 17, batch 4600, loss[loss=0.2139, simple_loss=0.2784, pruned_loss=0.07471, over 4810.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.254, pruned_loss=0.05708, over 955054.60 frames. ], batch size: 39, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:07:08,138 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 21:07:11,424 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.099e+02 1.530e+02 1.886e+02 2.340e+02 4.335e+02, threshold=3.772e+02, percent-clipped=2.0 +2023-03-26 21:07:26,495 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96273.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:07:40,044 INFO [finetune.py:976] (5/7) Epoch 17, batch 4650, loss[loss=0.1595, simple_loss=0.2359, pruned_loss=0.04156, over 4812.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2514, pruned_loss=0.05613, over 952816.31 frames. ], batch size: 40, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:07:57,967 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-26 21:07:58,449 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=96321.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:08:00,257 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96324.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:08:08,025 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96336.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:08:13,197 INFO [finetune.py:976] (5/7) Epoch 17, batch 4700, loss[loss=0.1626, simple_loss=0.2268, pruned_loss=0.04918, over 4762.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2482, pruned_loss=0.05546, over 951449.96 frames. ], batch size: 26, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:08:18,319 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.531e+02 1.882e+02 2.216e+02 4.319e+02, threshold=3.764e+02, percent-clipped=2.0 +2023-03-26 21:08:19,720 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.61 vs. limit=5.0 +2023-03-26 21:08:40,445 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96385.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:08:43,982 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96390.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:08:45,650 INFO [finetune.py:976] (5/7) Epoch 17, batch 4750, loss[loss=0.1687, simple_loss=0.2438, pruned_loss=0.04683, over 4897.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2468, pruned_loss=0.05471, over 953869.71 frames. ], batch size: 43, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:08:54,726 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 21:09:09,992 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3992, 1.5107, 1.7083, 1.6395, 1.4691, 3.1680, 1.2745, 1.4481], + device='cuda:5'), covar=tensor([0.0993, 0.1686, 0.1210, 0.0974, 0.1566, 0.0260, 0.1479, 0.1756], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0078, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 21:09:13,611 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8285, 1.6312, 1.4206, 1.2130, 1.5626, 1.5517, 1.5618, 2.1664], + device='cuda:5'), covar=tensor([0.3560, 0.3705, 0.3050, 0.3487, 0.3717, 0.2191, 0.3428, 0.1675], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0228, 0.0278, 0.0253, 0.0220, 0.0252, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:09:14,762 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96436.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:09:19,420 INFO [finetune.py:976] (5/7) Epoch 17, batch 4800, loss[loss=0.1928, simple_loss=0.274, pruned_loss=0.05576, over 4762.00 frames. ], tot_loss[loss=0.1815, simple_loss=0.2505, pruned_loss=0.05623, over 954155.57 frames. ], batch size: 54, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:09:25,023 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.609e+02 1.875e+02 2.422e+02 6.864e+02, threshold=3.750e+02, percent-clipped=2.0 +2023-03-26 21:09:25,777 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96451.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:09:36,645 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96468.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 21:09:54,759 INFO [finetune.py:976] (5/7) Epoch 17, batch 4850, loss[loss=0.1841, simple_loss=0.2531, pruned_loss=0.05755, over 4895.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2543, pruned_loss=0.05743, over 956341.88 frames. ], batch size: 32, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:10:02,918 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96503.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:10:15,703 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=96516.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:10:18,747 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96521.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:10:45,278 INFO [finetune.py:976] (5/7) Epoch 17, batch 4900, loss[loss=0.1723, simple_loss=0.2435, pruned_loss=0.0506, over 4925.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2566, pruned_loss=0.05851, over 955115.69 frames. ], batch size: 33, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:10:54,623 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.014e+02 1.592e+02 1.896e+02 2.164e+02 3.347e+02, threshold=3.792e+02, percent-clipped=0.0 +2023-03-26 21:10:55,804 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=96551.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:11:14,883 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8449, 1.3011, 1.9080, 1.8429, 1.6064, 1.5505, 1.7722, 1.7341], + device='cuda:5'), covar=tensor([0.3700, 0.3852, 0.3148, 0.3443, 0.4611, 0.3648, 0.4305, 0.3098], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0240, 0.0258, 0.0273, 0.0272, 0.0247, 0.0281, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:11:26,124 INFO [finetune.py:976] (5/7) Epoch 17, batch 4950, loss[loss=0.1999, simple_loss=0.2734, pruned_loss=0.06313, over 4885.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2571, pruned_loss=0.05806, over 957040.18 frames. ], batch size: 43, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:11:55,616 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96636.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:11:59,760 INFO [finetune.py:976] (5/7) Epoch 17, batch 5000, loss[loss=0.1501, simple_loss=0.2182, pruned_loss=0.041, over 4726.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2543, pruned_loss=0.05669, over 956233.91 frames. ], batch size: 54, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:12:04,411 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.531e+02 1.819e+02 2.156e+02 3.437e+02, threshold=3.638e+02, percent-clipped=0.0 +2023-03-26 21:12:24,601 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96680.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:12:26,963 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=96684.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:12:33,329 INFO [finetune.py:976] (5/7) Epoch 17, batch 5050, loss[loss=0.1863, simple_loss=0.2478, pruned_loss=0.06243, over 4790.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.2503, pruned_loss=0.05528, over 956695.05 frames. ], batch size: 51, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:12:36,907 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 21:12:37,084 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1131, 3.5497, 3.7276, 3.8378, 3.8967, 3.6947, 4.1364, 1.6996], + device='cuda:5'), covar=tensor([0.0699, 0.0905, 0.0894, 0.1010, 0.1077, 0.1250, 0.0732, 0.4634], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0246, 0.0278, 0.0294, 0.0337, 0.0283, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:13:01,816 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96736.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:13:06,443 INFO [finetune.py:976] (5/7) Epoch 17, batch 5100, loss[loss=0.1573, simple_loss=0.2257, pruned_loss=0.0444, over 4771.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2467, pruned_loss=0.05432, over 956324.45 frames. ], batch size: 26, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:13:08,322 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96746.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:13:10,588 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.513e+02 1.848e+02 2.246e+02 3.685e+02, threshold=3.695e+02, percent-clipped=1.0 +2023-03-26 21:13:28,943 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96776.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:13:33,647 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=96784.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:13:38,579 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0510, 1.8277, 2.4360, 3.9198, 2.6870, 2.6485, 0.9319, 3.1845], + device='cuda:5'), covar=tensor([0.1602, 0.1362, 0.1298, 0.0496, 0.0669, 0.1639, 0.1930, 0.0493], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0135, 0.0166, 0.0102, 0.0137, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:13:39,090 INFO [finetune.py:976] (5/7) Epoch 17, batch 5150, loss[loss=0.1742, simple_loss=0.2446, pruned_loss=0.05195, over 4817.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.2471, pruned_loss=0.05469, over 955579.16 frames. ], batch size: 38, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:13:58,210 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96821.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:14:08,378 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96837.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:14:11,883 INFO [finetune.py:976] (5/7) Epoch 17, batch 5200, loss[loss=0.1925, simple_loss=0.2772, pruned_loss=0.05393, over 4904.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.2498, pruned_loss=0.056, over 951667.09 frames. ], batch size: 43, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:14:16,597 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.635e+02 1.977e+02 2.300e+02 5.939e+02, threshold=3.955e+02, percent-clipped=5.0 +2023-03-26 21:14:20,451 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-26 21:14:29,996 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=96869.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:14:31,836 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.6389, 4.0082, 4.2411, 4.4649, 4.3695, 4.1126, 4.7115, 1.5311], + device='cuda:5'), covar=tensor([0.0695, 0.0881, 0.0819, 0.0876, 0.1241, 0.1520, 0.0604, 0.5226], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0245, 0.0277, 0.0291, 0.0335, 0.0281, 0.0300, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:14:40,810 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5390, 1.5179, 2.0104, 3.1701, 2.0489, 2.1478, 0.9324, 2.6381], + device='cuda:5'), covar=tensor([0.1800, 0.1384, 0.1322, 0.0642, 0.0831, 0.1478, 0.1958, 0.0548], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0135, 0.0166, 0.0102, 0.0137, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:14:44,942 INFO [finetune.py:976] (5/7) Epoch 17, batch 5250, loss[loss=0.195, simple_loss=0.2664, pruned_loss=0.06175, over 4927.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2527, pruned_loss=0.05661, over 952632.98 frames. ], batch size: 33, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:15:21,072 INFO [finetune.py:976] (5/7) Epoch 17, batch 5300, loss[loss=0.1789, simple_loss=0.2507, pruned_loss=0.05355, over 4914.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2549, pruned_loss=0.05743, over 955581.45 frames. ], batch size: 33, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:15:21,226 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8306, 1.3981, 1.8618, 1.8558, 1.6459, 1.6399, 1.8543, 1.6802], + device='cuda:5'), covar=tensor([0.4011, 0.4053, 0.3415, 0.3697, 0.5278, 0.3930, 0.4580, 0.3415], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0241, 0.0260, 0.0275, 0.0273, 0.0248, 0.0283, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:15:30,006 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.125e+02 1.644e+02 1.959e+02 2.379e+02 3.599e+02, threshold=3.918e+02, percent-clipped=0.0 +2023-03-26 21:16:05,196 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96980.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:16:17,644 INFO [finetune.py:976] (5/7) Epoch 17, batch 5350, loss[loss=0.1722, simple_loss=0.2414, pruned_loss=0.05153, over 4708.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2545, pruned_loss=0.05687, over 953773.34 frames. ], batch size: 23, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:16:25,304 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97000.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:16:44,613 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=97028.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:16:54,581 INFO [finetune.py:976] (5/7) Epoch 17, batch 5400, loss[loss=0.1542, simple_loss=0.2173, pruned_loss=0.04557, over 4289.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2518, pruned_loss=0.05632, over 955046.90 frames. ], batch size: 65, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:16:56,508 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97046.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:16:58,799 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.591e+02 1.860e+02 2.348e+02 4.043e+02, threshold=3.721e+02, percent-clipped=1.0 +2023-03-26 21:17:06,263 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97061.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:17:27,397 INFO [finetune.py:976] (5/7) Epoch 17, batch 5450, loss[loss=0.1691, simple_loss=0.2307, pruned_loss=0.05368, over 4814.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.2486, pruned_loss=0.05557, over 954786.48 frames. ], batch size: 51, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:17:28,069 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=97094.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:17:37,256 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6591, 0.6877, 1.7475, 1.6309, 1.5415, 1.4879, 1.5600, 1.6286], + device='cuda:5'), covar=tensor([0.3702, 0.3717, 0.2969, 0.3253, 0.4033, 0.3155, 0.3574, 0.2876], + device='cuda:5'), in_proj_covar=tensor([0.0250, 0.0241, 0.0260, 0.0275, 0.0274, 0.0248, 0.0284, 0.0242], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:17:52,425 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97132.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:18:00,547 INFO [finetune.py:976] (5/7) Epoch 17, batch 5500, loss[loss=0.133, simple_loss=0.2086, pruned_loss=0.02873, over 4820.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2454, pruned_loss=0.05424, over 955317.94 frames. ], batch size: 51, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:18:04,745 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.221e+01 1.533e+02 1.769e+02 2.042e+02 3.202e+02, threshold=3.539e+02, percent-clipped=0.0 +2023-03-26 21:18:33,719 INFO [finetune.py:976] (5/7) Epoch 17, batch 5550, loss[loss=0.2009, simple_loss=0.2743, pruned_loss=0.06377, over 4913.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2467, pruned_loss=0.05476, over 953969.93 frames. ], batch size: 43, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:18:34,940 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7202, 1.7855, 1.5335, 1.9033, 2.3611, 1.9047, 1.7321, 1.4650], + device='cuda:5'), covar=tensor([0.2015, 0.1861, 0.1752, 0.1412, 0.1679, 0.1170, 0.2083, 0.1827], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0208, 0.0212, 0.0191, 0.0241, 0.0186, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:19:05,110 INFO [finetune.py:976] (5/7) Epoch 17, batch 5600, loss[loss=0.1813, simple_loss=0.2599, pruned_loss=0.05132, over 4930.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2517, pruned_loss=0.05612, over 956043.57 frames. ], batch size: 33, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:19:09,080 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.154e+02 1.641e+02 1.914e+02 2.406e+02 4.422e+02, threshold=3.827e+02, percent-clipped=1.0 +2023-03-26 21:19:14,866 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97260.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:19:18,896 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7199, 1.5321, 2.0789, 1.7799, 1.5609, 3.6426, 1.4399, 1.5531], + device='cuda:5'), covar=tensor([0.0913, 0.1927, 0.1057, 0.1031, 0.1743, 0.0209, 0.1537, 0.1885], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0078, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 21:19:34,291 INFO [finetune.py:976] (5/7) Epoch 17, batch 5650, loss[loss=0.2442, simple_loss=0.3065, pruned_loss=0.09092, over 4845.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2556, pruned_loss=0.05689, over 956779.11 frames. ], batch size: 49, lr: 3.37e-03, grad_scale: 32.0 +2023-03-26 21:19:51,445 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97321.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:19:58,130 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7862, 1.6538, 1.6380, 1.7037, 1.1501, 2.9408, 1.2490, 1.6504], + device='cuda:5'), covar=tensor([0.2782, 0.2086, 0.1907, 0.2009, 0.1643, 0.0301, 0.2218, 0.1157], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0122, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 21:20:04,108 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97342.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:20:04,603 INFO [finetune.py:976] (5/7) Epoch 17, batch 5700, loss[loss=0.1463, simple_loss=0.2029, pruned_loss=0.04489, over 4201.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2521, pruned_loss=0.05653, over 936569.85 frames. ], batch size: 18, lr: 3.36e-03, grad_scale: 32.0 +2023-03-26 21:20:07,048 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97347.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:20:08,735 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.528e+01 1.533e+02 1.739e+02 2.212e+02 3.283e+02, threshold=3.478e+02, percent-clipped=0.0 +2023-03-26 21:20:12,306 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97356.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:20:35,935 INFO [finetune.py:976] (5/7) Epoch 18, batch 0, loss[loss=0.1832, simple_loss=0.2606, pruned_loss=0.05296, over 4925.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2606, pruned_loss=0.05296, over 4925.00 frames. ], batch size: 42, lr: 3.36e-03, grad_scale: 32.0 +2023-03-26 21:20:35,935 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 21:20:46,783 INFO [finetune.py:1010] (5/7) Epoch 18, validation: loss=0.1584, simple_loss=0.2281, pruned_loss=0.0444, over 2265189.00 frames. +2023-03-26 21:20:46,783 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 21:20:49,171 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97374.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:21:26,122 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97403.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 21:21:34,096 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97408.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:21:47,680 INFO [finetune.py:976] (5/7) Epoch 18, batch 50, loss[loss=0.1629, simple_loss=0.2489, pruned_loss=0.03847, over 4812.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2556, pruned_loss=0.05805, over 214037.10 frames. ], batch size: 25, lr: 3.36e-03, grad_scale: 32.0 +2023-03-26 21:21:58,963 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97432.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:22:00,773 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97435.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:22:09,807 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.163e+01 1.563e+02 1.902e+02 2.308e+02 3.615e+02, threshold=3.804e+02, percent-clipped=1.0 +2023-03-26 21:22:23,491 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-03-26 21:22:25,132 INFO [finetune.py:976] (5/7) Epoch 18, batch 100, loss[loss=0.1761, simple_loss=0.246, pruned_loss=0.05307, over 4899.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2485, pruned_loss=0.05533, over 379799.62 frames. ], batch size: 35, lr: 3.36e-03, grad_scale: 32.0 +2023-03-26 21:22:31,666 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=97480.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:22:58,722 INFO [finetune.py:976] (5/7) Epoch 18, batch 150, loss[loss=0.1657, simple_loss=0.2425, pruned_loss=0.04441, over 4842.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2471, pruned_loss=0.05564, over 508472.17 frames. ], batch size: 47, lr: 3.36e-03, grad_scale: 32.0 +2023-03-26 21:23:13,607 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3289, 2.9765, 2.8005, 1.3444, 3.0903, 2.2183, 0.6396, 2.0436], + device='cuda:5'), covar=tensor([0.2582, 0.2148, 0.1907, 0.3666, 0.1393, 0.1227, 0.4443, 0.1694], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0175, 0.0158, 0.0128, 0.0158, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 21:23:17,210 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.559e+02 1.792e+02 2.227e+02 6.409e+02, threshold=3.584e+02, percent-clipped=2.0 +2023-03-26 21:23:20,340 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97555.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:23:32,348 INFO [finetune.py:976] (5/7) Epoch 18, batch 200, loss[loss=0.2082, simple_loss=0.2725, pruned_loss=0.07194, over 4851.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2449, pruned_loss=0.05507, over 606778.22 frames. ], batch size: 47, lr: 3.36e-03, grad_scale: 32.0 +2023-03-26 21:23:43,756 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4172, 1.0690, 0.7790, 1.3017, 1.8697, 0.7096, 1.1406, 1.3427], + device='cuda:5'), covar=tensor([0.1682, 0.2313, 0.1789, 0.1314, 0.2061, 0.2111, 0.1608, 0.2112], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0095, 0.0110, 0.0092, 0.0119, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:24:01,854 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97616.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:24:01,912 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97616.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:24:05,308 INFO [finetune.py:976] (5/7) Epoch 18, batch 250, loss[loss=0.1837, simple_loss=0.2606, pruned_loss=0.0534, over 4893.00 frames. ], tot_loss[loss=0.1784, simple_loss=0.247, pruned_loss=0.05492, over 684431.84 frames. ], batch size: 43, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:24:16,420 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7372, 1.7519, 2.2123, 3.4943, 2.4103, 2.3982, 1.2106, 2.8065], + device='cuda:5'), covar=tensor([0.1749, 0.1355, 0.1363, 0.0576, 0.0740, 0.1246, 0.1801, 0.0518], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0134, 0.0164, 0.0100, 0.0136, 0.0123, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:24:18,253 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6476, 1.2164, 0.8964, 1.5465, 2.0403, 1.2563, 1.3778, 1.5561], + device='cuda:5'), covar=tensor([0.1612, 0.2101, 0.1865, 0.1178, 0.1874, 0.2062, 0.1485, 0.1923], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0092, 0.0119, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:24:24,121 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.114e+02 1.615e+02 1.960e+02 2.417e+02 4.168e+02, threshold=3.921e+02, percent-clipped=3.0 +2023-03-26 21:24:27,908 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97656.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:24:37,889 INFO [finetune.py:976] (5/7) Epoch 18, batch 300, loss[loss=0.1883, simple_loss=0.2625, pruned_loss=0.0571, over 4760.00 frames. ], tot_loss[loss=0.1793, simple_loss=0.2487, pruned_loss=0.05497, over 743922.96 frames. ], batch size: 27, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:24:40,281 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97674.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:24:56,232 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97698.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:24:59,262 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97703.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:24:59,859 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=97704.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:25:10,593 INFO [finetune.py:976] (5/7) Epoch 18, batch 350, loss[loss=0.2248, simple_loss=0.2963, pruned_loss=0.07659, over 4921.00 frames. ], tot_loss[loss=0.1812, simple_loss=0.2507, pruned_loss=0.05584, over 791067.76 frames. ], batch size: 42, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:25:17,547 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97730.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:25:21,077 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97735.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 21:25:28,926 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3524, 2.1895, 1.7509, 2.2387, 2.1024, 1.9284, 2.5581, 2.3163], + device='cuda:5'), covar=tensor([0.1398, 0.2248, 0.3222, 0.2915, 0.2945, 0.1806, 0.3215, 0.1864], + device='cuda:5'), in_proj_covar=tensor([0.0184, 0.0188, 0.0235, 0.0254, 0.0246, 0.0203, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:25:30,595 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.611e+02 1.882e+02 2.387e+02 3.928e+02, threshold=3.763e+02, percent-clipped=1.0 +2023-03-26 21:25:43,300 INFO [finetune.py:976] (5/7) Epoch 18, batch 400, loss[loss=0.1764, simple_loss=0.2468, pruned_loss=0.05303, over 4812.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2526, pruned_loss=0.05639, over 827906.72 frames. ], batch size: 30, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:25:46,127 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4221, 1.5856, 1.0710, 2.2015, 2.6891, 1.8261, 2.0026, 2.2134], + device='cuda:5'), covar=tensor([0.1408, 0.1996, 0.1893, 0.1082, 0.1540, 0.1852, 0.1385, 0.1886], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0095, 0.0110, 0.0092, 0.0119, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:25:52,696 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.63 vs. limit=5.0 +2023-03-26 21:26:07,923 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 21:26:22,842 INFO [finetune.py:976] (5/7) Epoch 18, batch 450, loss[loss=0.1961, simple_loss=0.2563, pruned_loss=0.0679, over 4847.00 frames. ], tot_loss[loss=0.1821, simple_loss=0.252, pruned_loss=0.05613, over 858160.42 frames. ], batch size: 49, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:26:52,265 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1688, 2.0597, 1.7209, 2.0422, 2.1089, 1.8778, 2.4370, 2.1788], + device='cuda:5'), covar=tensor([0.1127, 0.1938, 0.2709, 0.2334, 0.2377, 0.1481, 0.2738, 0.1578], + device='cuda:5'), in_proj_covar=tensor([0.0183, 0.0187, 0.0234, 0.0253, 0.0244, 0.0201, 0.0213, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:27:01,042 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.519e+02 1.772e+02 2.128e+02 3.513e+02, threshold=3.544e+02, percent-clipped=0.0 +2023-03-26 21:27:16,934 INFO [finetune.py:976] (5/7) Epoch 18, batch 500, loss[loss=0.1784, simple_loss=0.244, pruned_loss=0.05636, over 4829.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.2486, pruned_loss=0.05518, over 880445.15 frames. ], batch size: 49, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:27:29,769 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97888.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:27:30,811 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-03-26 21:27:44,610 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97911.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:27:47,674 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97916.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:27:50,635 INFO [finetune.py:976] (5/7) Epoch 18, batch 550, loss[loss=0.2072, simple_loss=0.2609, pruned_loss=0.07674, over 4907.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2452, pruned_loss=0.05437, over 896018.20 frames. ], batch size: 35, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:28:19,259 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97949.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:28:19,731 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.294e+01 1.530e+02 1.816e+02 2.060e+02 3.951e+02, threshold=3.633e+02, percent-clipped=3.0 +2023-03-26 21:28:28,266 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=97964.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:28:28,360 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0460, 1.2521, 1.9267, 1.8791, 1.7332, 1.7051, 1.7576, 1.8169], + device='cuda:5'), covar=tensor([0.3919, 0.3891, 0.3589, 0.3911, 0.5085, 0.4055, 0.4562, 0.3322], + device='cuda:5'), in_proj_covar=tensor([0.0250, 0.0241, 0.0261, 0.0277, 0.0275, 0.0249, 0.0285, 0.0242], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:28:32,537 INFO [finetune.py:976] (5/7) Epoch 18, batch 600, loss[loss=0.2843, simple_loss=0.3388, pruned_loss=0.1149, over 4734.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2477, pruned_loss=0.05582, over 908354.83 frames. ], batch size: 59, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:28:47,278 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1515, 1.2995, 1.3685, 0.7560, 1.2682, 1.5272, 1.5825, 1.2482], + device='cuda:5'), covar=tensor([0.0856, 0.0531, 0.0532, 0.0487, 0.0465, 0.0630, 0.0305, 0.0736], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0123, 0.0126, 0.0129, 0.0128, 0.0142, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.0904e-05, 1.0867e-04, 8.7769e-05, 8.9685e-05, 9.1087e-05, 9.2212e-05, + 1.0221e-04, 1.0569e-04], device='cuda:5') +2023-03-26 21:28:51,971 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97998.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:28:56,788 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98003.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:28:57,550 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.59 vs. limit=5.0 +2023-03-26 21:29:07,660 INFO [finetune.py:976] (5/7) Epoch 18, batch 650, loss[loss=0.1458, simple_loss=0.2122, pruned_loss=0.03968, over 4708.00 frames. ], tot_loss[loss=0.1819, simple_loss=0.2507, pruned_loss=0.05657, over 918312.06 frames. ], batch size: 23, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:29:13,279 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98030.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:29:13,315 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98030.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:29:25,508 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=98046.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:29:28,301 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.027e+02 1.543e+02 1.878e+02 2.128e+02 3.672e+02, threshold=3.757e+02, percent-clipped=1.0 +2023-03-26 21:29:28,992 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=98051.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:29:41,538 INFO [finetune.py:976] (5/7) Epoch 18, batch 700, loss[loss=0.1623, simple_loss=0.2311, pruned_loss=0.04678, over 4825.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2517, pruned_loss=0.05615, over 926968.03 frames. ], batch size: 33, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:29:45,872 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=98078.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:30:05,697 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98106.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:30:15,235 INFO [finetune.py:976] (5/7) Epoch 18, batch 750, loss[loss=0.1819, simple_loss=0.2472, pruned_loss=0.05826, over 4763.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2537, pruned_loss=0.05652, over 934674.82 frames. ], batch size: 28, lr: 3.36e-03, grad_scale: 64.0 +2023-03-26 21:30:19,031 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 21:30:30,854 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-03-26 21:30:34,724 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.501e+02 1.785e+02 2.309e+02 4.193e+02, threshold=3.569e+02, percent-clipped=2.0 +2023-03-26 21:30:46,076 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98167.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:30:48,364 INFO [finetune.py:976] (5/7) Epoch 18, batch 800, loss[loss=0.1862, simple_loss=0.2608, pruned_loss=0.05573, over 4885.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2539, pruned_loss=0.05669, over 939377.70 frames. ], batch size: 32, lr: 3.36e-03, grad_scale: 32.0 +2023-03-26 21:30:50,554 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-03-26 21:30:56,328 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5711, 1.4637, 1.4833, 1.5027, 0.9749, 2.9688, 1.1364, 1.4863], + device='cuda:5'), covar=tensor([0.3273, 0.2513, 0.2143, 0.2369, 0.1933, 0.0235, 0.2659, 0.1359], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0123, 0.0113, 0.0096, 0.0096, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 21:31:15,633 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98211.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:31:22,190 INFO [finetune.py:976] (5/7) Epoch 18, batch 850, loss[loss=0.1619, simple_loss=0.2301, pruned_loss=0.04682, over 4779.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2523, pruned_loss=0.05688, over 941800.85 frames. ], batch size: 28, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:31:45,837 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98244.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:31:55,530 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.511e+02 1.794e+02 2.111e+02 3.360e+02, threshold=3.589e+02, percent-clipped=0.0 +2023-03-26 21:32:07,093 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=98259.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:32:15,536 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1415, 2.0073, 1.8174, 2.1583, 2.6154, 2.0905, 2.0967, 1.6733], + device='cuda:5'), covar=tensor([0.1965, 0.1899, 0.1755, 0.1439, 0.1750, 0.1151, 0.1960, 0.1699], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0209, 0.0212, 0.0191, 0.0242, 0.0187, 0.0215, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:32:25,141 INFO [finetune.py:976] (5/7) Epoch 18, batch 900, loss[loss=0.1478, simple_loss=0.209, pruned_loss=0.04332, over 4817.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2497, pruned_loss=0.05619, over 946283.76 frames. ], batch size: 38, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:33:02,858 INFO [finetune.py:976] (5/7) Epoch 18, batch 950, loss[loss=0.1662, simple_loss=0.2314, pruned_loss=0.05049, over 4907.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2478, pruned_loss=0.05573, over 947963.48 frames. ], batch size: 36, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:33:08,459 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98330.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:33:23,082 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.033e+02 1.511e+02 1.760e+02 2.160e+02 3.441e+02, threshold=3.521e+02, percent-clipped=0.0 +2023-03-26 21:33:37,316 INFO [finetune.py:976] (5/7) Epoch 18, batch 1000, loss[loss=0.1493, simple_loss=0.2415, pruned_loss=0.02849, over 4839.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2485, pruned_loss=0.05547, over 947780.57 frames. ], batch size: 49, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:33:42,165 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=98378.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 21:34:02,783 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98410.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:34:06,237 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3500, 1.4256, 1.4981, 1.5476, 1.4386, 2.8347, 1.3544, 1.4588], + device='cuda:5'), covar=tensor([0.0959, 0.1812, 0.1208, 0.0932, 0.1618, 0.0308, 0.1463, 0.1764], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0084, 0.0078], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 21:34:14,797 INFO [finetune.py:976] (5/7) Epoch 18, batch 1050, loss[loss=0.2153, simple_loss=0.2765, pruned_loss=0.077, over 4753.00 frames. ], tot_loss[loss=0.1821, simple_loss=0.2517, pruned_loss=0.05628, over 950078.25 frames. ], batch size: 59, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:34:26,451 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9331, 1.7970, 1.5283, 1.4084, 1.8846, 1.6439, 1.8217, 1.8934], + device='cuda:5'), covar=tensor([0.1371, 0.1915, 0.3165, 0.2513, 0.2706, 0.1839, 0.3151, 0.1803], + device='cuda:5'), in_proj_covar=tensor([0.0184, 0.0187, 0.0235, 0.0254, 0.0245, 0.0203, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:34:36,628 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.558e+02 1.856e+02 2.287e+02 5.753e+02, threshold=3.713e+02, percent-clipped=4.0 +2023-03-26 21:34:44,676 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98462.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:34:51,516 INFO [finetune.py:976] (5/7) Epoch 18, batch 1100, loss[loss=0.1826, simple_loss=0.255, pruned_loss=0.05516, over 4836.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2538, pruned_loss=0.05724, over 951709.97 frames. ], batch size: 49, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:34:51,648 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98471.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:35:08,487 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.8267, 0.8612, 0.8063, 0.9799, 1.0118, 0.9711, 0.9010, 0.8337], + device='cuda:5'), covar=tensor([0.0451, 0.0282, 0.0543, 0.0264, 0.0256, 0.0358, 0.0262, 0.0321], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0108, 0.0144, 0.0113, 0.0101, 0.0109, 0.0099, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.5148e-05, 8.3451e-05, 1.1378e-04, 8.6666e-05, 7.8786e-05, 8.0849e-05, + 7.4099e-05, 8.4446e-05], device='cuda:5') +2023-03-26 21:35:14,991 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9062, 1.5055, 1.9245, 1.9597, 1.7338, 1.6273, 1.8918, 1.7615], + device='cuda:5'), covar=tensor([0.3915, 0.4013, 0.3276, 0.3463, 0.4826, 0.3746, 0.4153, 0.3131], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0240, 0.0259, 0.0274, 0.0273, 0.0248, 0.0283, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:35:24,232 INFO [finetune.py:976] (5/7) Epoch 18, batch 1150, loss[loss=0.1639, simple_loss=0.2415, pruned_loss=0.0431, over 4923.00 frames. ], tot_loss[loss=0.1865, simple_loss=0.2565, pruned_loss=0.05822, over 951683.60 frames. ], batch size: 38, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:35:39,053 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98543.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:35:39,640 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98544.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:35:43,756 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.197e+02 1.665e+02 1.912e+02 2.323e+02 4.830e+02, threshold=3.825e+02, percent-clipped=3.0 +2023-03-26 21:35:57,262 INFO [finetune.py:976] (5/7) Epoch 18, batch 1200, loss[loss=0.1676, simple_loss=0.2377, pruned_loss=0.04878, over 4821.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2538, pruned_loss=0.05688, over 952517.34 frames. ], batch size: 39, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:35:59,142 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7627, 0.9961, 1.6378, 1.5886, 1.4550, 1.3775, 1.4833, 1.6535], + device='cuda:5'), covar=tensor([0.3683, 0.3762, 0.3654, 0.3803, 0.4857, 0.4028, 0.4378, 0.3476], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0241, 0.0259, 0.0275, 0.0274, 0.0248, 0.0284, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:36:12,051 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=98592.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:36:19,566 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98604.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:36:31,240 INFO [finetune.py:976] (5/7) Epoch 18, batch 1250, loss[loss=0.1628, simple_loss=0.237, pruned_loss=0.04428, over 4187.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.2518, pruned_loss=0.05636, over 951702.21 frames. ], batch size: 65, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:37:01,601 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.074e+02 1.465e+02 1.816e+02 2.333e+02 4.053e+02, threshold=3.632e+02, percent-clipped=1.0 +2023-03-26 21:37:29,821 INFO [finetune.py:976] (5/7) Epoch 18, batch 1300, loss[loss=0.1634, simple_loss=0.2433, pruned_loss=0.04171, over 4748.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.248, pruned_loss=0.05468, over 952718.58 frames. ], batch size: 26, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:38:11,395 INFO [finetune.py:976] (5/7) Epoch 18, batch 1350, loss[loss=0.2293, simple_loss=0.2996, pruned_loss=0.0795, over 4900.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.248, pruned_loss=0.05522, over 951564.74 frames. ], batch size: 43, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:38:31,464 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.676e+02 1.899e+02 2.271e+02 3.691e+02, threshold=3.798e+02, percent-clipped=1.0 +2023-03-26 21:38:38,225 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98762.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:38:41,102 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98766.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:38:44,610 INFO [finetune.py:976] (5/7) Epoch 18, batch 1400, loss[loss=0.2099, simple_loss=0.2731, pruned_loss=0.07332, over 4895.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.251, pruned_loss=0.05628, over 951497.88 frames. ], batch size: 35, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:38:59,245 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2729, 1.9342, 1.9419, 0.9432, 2.2023, 2.4590, 2.2017, 1.8299], + device='cuda:5'), covar=tensor([0.0835, 0.0793, 0.0527, 0.0675, 0.0483, 0.0547, 0.0412, 0.0685], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0151, 0.0123, 0.0126, 0.0131, 0.0128, 0.0143, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.1204e-05, 1.0935e-04, 8.8548e-05, 9.0044e-05, 9.2325e-05, 9.2170e-05, + 1.0261e-04, 1.0609e-04], device='cuda:5') +2023-03-26 21:39:06,064 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7944, 1.7387, 1.5362, 1.9459, 2.1710, 1.9682, 1.5287, 1.4896], + device='cuda:5'), covar=tensor([0.2102, 0.1987, 0.1896, 0.1490, 0.1682, 0.1171, 0.2377, 0.1917], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0209, 0.0212, 0.0191, 0.0241, 0.0186, 0.0214, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:39:10,804 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=98810.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:39:17,935 INFO [finetune.py:976] (5/7) Epoch 18, batch 1450, loss[loss=0.1256, simple_loss=0.1929, pruned_loss=0.0291, over 4296.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2534, pruned_loss=0.05721, over 951889.70 frames. ], batch size: 18, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:39:40,442 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.618e+02 1.947e+02 2.343e+02 3.750e+02, threshold=3.895e+02, percent-clipped=0.0 +2023-03-26 21:39:52,508 INFO [finetune.py:976] (5/7) Epoch 18, batch 1500, loss[loss=0.2405, simple_loss=0.3041, pruned_loss=0.08845, over 4822.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.255, pruned_loss=0.05767, over 952117.37 frames. ], batch size: 47, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:40:06,458 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98890.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:40:08,065 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 21:40:10,021 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98895.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:40:12,872 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98899.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:40:26,172 INFO [finetune.py:976] (5/7) Epoch 18, batch 1550, loss[loss=0.1849, simple_loss=0.2579, pruned_loss=0.05594, over 4765.00 frames. ], tot_loss[loss=0.1843, simple_loss=0.2546, pruned_loss=0.05696, over 954252.15 frames. ], batch size: 28, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:40:47,925 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.111e+02 1.516e+02 1.782e+02 2.221e+02 4.511e+02, threshold=3.564e+02, percent-clipped=1.0 +2023-03-26 21:40:48,074 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98951.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:40:51,141 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98956.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:41:00,140 INFO [finetune.py:976] (5/7) Epoch 18, batch 1600, loss[loss=0.1565, simple_loss=0.2214, pruned_loss=0.0458, over 4910.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2527, pruned_loss=0.05686, over 954961.80 frames. ], batch size: 36, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:41:00,252 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98971.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:41:33,949 INFO [finetune.py:976] (5/7) Epoch 18, batch 1650, loss[loss=0.1834, simple_loss=0.2429, pruned_loss=0.06195, over 4820.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2503, pruned_loss=0.05625, over 955613.75 frames. ], batch size: 41, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:41:35,304 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5300, 1.6610, 1.3741, 1.6788, 1.9273, 1.7618, 1.6608, 1.4687], + device='cuda:5'), covar=tensor([0.0374, 0.0280, 0.0523, 0.0292, 0.0188, 0.0616, 0.0297, 0.0389], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0107, 0.0145, 0.0112, 0.0101, 0.0109, 0.0099, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.5191e-05, 8.3043e-05, 1.1414e-04, 8.6339e-05, 7.8439e-05, 8.0993e-05, + 7.3824e-05, 8.4533e-05], device='cuda:5') +2023-03-26 21:41:41,740 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99032.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:42:02,530 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.819e+01 1.633e+02 2.015e+02 2.343e+02 3.841e+02, threshold=4.030e+02, percent-clipped=3.0 +2023-03-26 21:42:03,107 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99051.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:42:22,692 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99066.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:42:25,675 INFO [finetune.py:976] (5/7) Epoch 18, batch 1700, loss[loss=0.2191, simple_loss=0.2958, pruned_loss=0.0712, over 4726.00 frames. ], tot_loss[loss=0.1788, simple_loss=0.247, pruned_loss=0.05524, over 955527.70 frames. ], batch size: 59, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:43:01,002 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99112.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:43:06,387 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99114.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:43:10,654 INFO [finetune.py:976] (5/7) Epoch 18, batch 1750, loss[loss=0.1471, simple_loss=0.2184, pruned_loss=0.03795, over 4758.00 frames. ], tot_loss[loss=0.1806, simple_loss=0.2493, pruned_loss=0.05595, over 955784.59 frames. ], batch size: 28, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:43:38,844 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.106e+01 1.643e+02 1.917e+02 2.422e+02 4.876e+02, threshold=3.835e+02, percent-clipped=2.0 +2023-03-26 21:43:51,913 INFO [finetune.py:976] (5/7) Epoch 18, batch 1800, loss[loss=0.1996, simple_loss=0.273, pruned_loss=0.06305, over 4908.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2527, pruned_loss=0.05666, over 957856.93 frames. ], batch size: 37, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:44:10,869 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99199.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:44:20,982 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7982, 1.3532, 1.7751, 1.7255, 1.5782, 1.5563, 1.6361, 1.7267], + device='cuda:5'), covar=tensor([0.4964, 0.4547, 0.3882, 0.4639, 0.5615, 0.4634, 0.5620, 0.3881], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0239, 0.0259, 0.0274, 0.0273, 0.0247, 0.0283, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:44:25,713 INFO [finetune.py:976] (5/7) Epoch 18, batch 1850, loss[loss=0.1868, simple_loss=0.2716, pruned_loss=0.05098, over 4779.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2549, pruned_loss=0.05715, over 958440.81 frames. ], batch size: 29, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:44:30,701 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99229.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:44:42,404 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99246.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:44:43,023 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99247.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:44:44,907 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8819, 1.8164, 1.7442, 1.8118, 1.3586, 4.5655, 1.7146, 2.2132], + device='cuda:5'), covar=tensor([0.3247, 0.2375, 0.2033, 0.2263, 0.1670, 0.0134, 0.2454, 0.1156], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0123, 0.0114, 0.0096, 0.0096, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 21:44:45,842 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.071e+02 1.618e+02 1.939e+02 2.302e+02 3.831e+02, threshold=3.878e+02, percent-clipped=0.0 +2023-03-26 21:44:45,940 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99251.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:44:57,646 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99268.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:44:59,345 INFO [finetune.py:976] (5/7) Epoch 18, batch 1900, loss[loss=0.1914, simple_loss=0.2648, pruned_loss=0.05902, over 4884.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.256, pruned_loss=0.05718, over 956467.42 frames. ], batch size: 32, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:45:11,556 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99290.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:45:20,586 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 21:45:33,104 INFO [finetune.py:976] (5/7) Epoch 18, batch 1950, loss[loss=0.1924, simple_loss=0.2474, pruned_loss=0.06865, over 4814.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.2535, pruned_loss=0.05555, over 957145.15 frames. ], batch size: 30, lr: 3.35e-03, grad_scale: 32.0 +2023-03-26 21:45:35,054 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99324.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:45:36,833 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99327.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:45:38,113 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99329.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:45:52,709 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.452e+02 1.660e+02 1.987e+02 4.820e+02, threshold=3.320e+02, percent-clipped=1.0 +2023-03-26 21:46:06,276 INFO [finetune.py:976] (5/7) Epoch 18, batch 2000, loss[loss=0.1703, simple_loss=0.2415, pruned_loss=0.04958, over 4898.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.2509, pruned_loss=0.05502, over 957241.24 frames. ], batch size: 32, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:46:15,393 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99385.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:46:23,710 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.92 vs. limit=5.0 +2023-03-26 21:46:30,185 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99407.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:46:39,600 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99420.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:46:40,078 INFO [finetune.py:976] (5/7) Epoch 18, batch 2050, loss[loss=0.1981, simple_loss=0.2724, pruned_loss=0.06188, over 4849.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2473, pruned_loss=0.05392, over 957189.55 frames. ], batch size: 49, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:46:59,829 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.109e+02 1.434e+02 1.742e+02 2.145e+02 5.049e+02, threshold=3.484e+02, percent-clipped=5.0 +2023-03-26 21:47:17,716 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99468.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:47:19,434 INFO [finetune.py:976] (5/7) Epoch 18, batch 2100, loss[loss=0.1673, simple_loss=0.2477, pruned_loss=0.04345, over 4850.00 frames. ], tot_loss[loss=0.1802, simple_loss=0.2492, pruned_loss=0.05559, over 957741.81 frames. ], batch size: 44, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:47:31,497 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99481.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:47:58,127 INFO [finetune.py:976] (5/7) Epoch 18, batch 2150, loss[loss=0.182, simple_loss=0.2554, pruned_loss=0.05434, over 4926.00 frames. ], tot_loss[loss=0.1817, simple_loss=0.2513, pruned_loss=0.05603, over 956302.26 frames. ], batch size: 33, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:48:08,438 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1322, 1.7606, 1.8663, 0.8141, 2.1332, 2.4624, 2.0560, 1.7243], + device='cuda:5'), covar=tensor([0.0984, 0.0924, 0.0518, 0.0722, 0.0553, 0.0507, 0.0503, 0.0801], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0153, 0.0124, 0.0127, 0.0132, 0.0130, 0.0144, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.1886e-05, 1.1078e-04, 8.9125e-05, 9.0182e-05, 9.2989e-05, 9.3121e-05, + 1.0343e-04, 1.0697e-04], device='cuda:5') +2023-03-26 21:48:08,440 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99529.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:48:28,269 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99546.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:48:31,199 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9615, 1.8999, 1.7214, 2.0981, 2.5981, 2.1447, 2.0002, 1.6657], + device='cuda:5'), covar=tensor([0.2028, 0.1870, 0.1893, 0.1524, 0.1472, 0.1101, 0.2020, 0.1854], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0207, 0.0210, 0.0190, 0.0238, 0.0184, 0.0213, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:48:35,645 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.546e+02 1.925e+02 2.366e+02 3.688e+02, threshold=3.850e+02, percent-clipped=2.0 +2023-03-26 21:48:35,743 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99551.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:48:52,013 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99568.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:48:53,717 INFO [finetune.py:976] (5/7) Epoch 18, batch 2200, loss[loss=0.1575, simple_loss=0.2354, pruned_loss=0.03976, over 4760.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2539, pruned_loss=0.05666, over 955120.45 frames. ], batch size: 28, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:49:03,265 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99585.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:05,362 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-03-26 21:49:05,760 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6807, 1.6115, 1.5084, 1.6413, 1.2423, 3.5344, 1.3885, 1.8909], + device='cuda:5'), covar=tensor([0.3356, 0.2503, 0.2188, 0.2223, 0.1714, 0.0217, 0.2514, 0.1211], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0121, 0.0122, 0.0113, 0.0096, 0.0096, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 21:49:05,776 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99589.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:08,210 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2355, 2.0417, 1.8613, 1.8322, 1.9232, 1.9690, 2.0372, 2.6465], + device='cuda:5'), covar=tensor([0.3700, 0.3809, 0.3079, 0.3366, 0.3423, 0.2366, 0.3266, 0.1701], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0262, 0.0228, 0.0275, 0.0252, 0.0220, 0.0251, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:49:08,743 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99594.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:10,009 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9243, 1.7260, 2.2572, 1.5598, 2.1299, 2.3425, 1.6117, 2.4148], + device='cuda:5'), covar=tensor([0.1275, 0.1696, 0.1586, 0.1725, 0.0779, 0.1084, 0.2441, 0.0721], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0202, 0.0191, 0.0187, 0.0174, 0.0211, 0.0214, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:49:11,759 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99599.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:20,438 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-03-26 21:49:23,792 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4525, 1.0455, 0.7823, 1.3778, 1.7769, 0.7527, 1.2301, 1.3996], + device='cuda:5'), covar=tensor([0.1254, 0.1916, 0.1628, 0.1003, 0.1859, 0.2291, 0.1294, 0.1650], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0096, 0.0111, 0.0093, 0.0120, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 21:49:27,186 INFO [finetune.py:976] (5/7) Epoch 18, batch 2250, loss[loss=0.1484, simple_loss=0.2277, pruned_loss=0.03457, over 4736.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2536, pruned_loss=0.05617, over 953484.69 frames. ], batch size: 27, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:49:29,581 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99624.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:31,415 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99627.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:33,079 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99629.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:46,352 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99650.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:49:46,821 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.914e+01 1.516e+02 1.824e+02 2.092e+02 3.162e+02, threshold=3.647e+02, percent-clipped=0.0 +2023-03-26 21:50:00,832 INFO [finetune.py:976] (5/7) Epoch 18, batch 2300, loss[loss=0.1527, simple_loss=0.2298, pruned_loss=0.03776, over 4898.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.2535, pruned_loss=0.0561, over 951204.62 frames. ], batch size: 43, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:50:03,810 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99675.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:50:06,816 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99680.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:50:06,842 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99680.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:50:17,476 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1044, 1.9966, 2.1209, 1.5976, 2.1257, 2.1229, 2.1442, 1.7951], + device='cuda:5'), covar=tensor([0.0542, 0.0604, 0.0599, 0.0841, 0.0679, 0.0612, 0.0582, 0.1030], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0134, 0.0140, 0.0121, 0.0124, 0.0139, 0.0140, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:50:24,609 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99707.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:50:24,716 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 21:50:34,079 INFO [finetune.py:976] (5/7) Epoch 18, batch 2350, loss[loss=0.1449, simple_loss=0.2124, pruned_loss=0.03869, over 4744.00 frames. ], tot_loss[loss=0.181, simple_loss=0.2511, pruned_loss=0.05548, over 953078.82 frames. ], batch size: 59, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:50:47,883 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99741.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:50:54,366 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.551e+02 1.845e+02 2.144e+02 4.060e+02, threshold=3.690e+02, percent-clipped=1.0 +2023-03-26 21:50:56,922 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99755.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:51:08,062 INFO [finetune.py:976] (5/7) Epoch 18, batch 2400, loss[loss=0.1797, simple_loss=0.243, pruned_loss=0.05819, over 4855.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2484, pruned_loss=0.05505, over 954843.80 frames. ], batch size: 44, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:51:11,642 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99776.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:51:41,397 INFO [finetune.py:976] (5/7) Epoch 18, batch 2450, loss[loss=0.1635, simple_loss=0.2237, pruned_loss=0.05161, over 4805.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2452, pruned_loss=0.05375, over 956238.01 frames. ], batch size: 25, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:51:43,695 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99824.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 21:51:47,783 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5444, 3.6647, 3.4735, 1.6351, 3.7701, 2.9136, 1.1834, 2.6554], + device='cuda:5'), covar=tensor([0.2450, 0.2263, 0.1492, 0.3223, 0.1074, 0.0888, 0.3796, 0.1388], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0129, 0.0160, 0.0124, 0.0149, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 21:52:01,735 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.091e+01 1.409e+02 1.705e+02 2.080e+02 4.896e+02, threshold=3.409e+02, percent-clipped=2.0 +2023-03-26 21:52:14,325 INFO [finetune.py:976] (5/7) Epoch 18, batch 2500, loss[loss=0.1645, simple_loss=0.2482, pruned_loss=0.04035, over 4820.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2469, pruned_loss=0.05439, over 957909.93 frames. ], batch size: 39, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:52:26,294 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99885.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:52:32,713 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5660, 3.6266, 3.3677, 1.6431, 3.7230, 2.9425, 0.8171, 2.5514], + device='cuda:5'), covar=tensor([0.2611, 0.2124, 0.1701, 0.3339, 0.1232, 0.0917, 0.4374, 0.1532], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0176, 0.0159, 0.0129, 0.0159, 0.0124, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 21:52:47,223 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7044, 1.0763, 0.9534, 1.5782, 2.0695, 1.2236, 1.3794, 1.4176], + device='cuda:5'), covar=tensor([0.1637, 0.2370, 0.1959, 0.1333, 0.1943, 0.2017, 0.1639, 0.2290], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0096, 0.0110, 0.0092, 0.0120, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 21:52:50,113 INFO [finetune.py:976] (5/7) Epoch 18, batch 2550, loss[loss=0.1439, simple_loss=0.2226, pruned_loss=0.03256, over 4780.00 frames. ], tot_loss[loss=0.1799, simple_loss=0.25, pruned_loss=0.05489, over 956235.89 frames. ], batch size: 25, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:52:52,503 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99924.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:52:52,539 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99924.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:52:56,144 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.42 vs. limit=5.0 +2023-03-26 21:52:58,897 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99933.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:53:06,655 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99945.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:53:10,619 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.647e+02 1.933e+02 2.438e+02 4.501e+02, threshold=3.867e+02, percent-clipped=7.0 +2023-03-26 21:53:15,988 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-03-26 21:53:20,825 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1087, 1.8303, 2.0383, 1.3557, 1.9773, 2.0521, 2.0529, 1.6506], + device='cuda:5'), covar=tensor([0.0517, 0.0628, 0.0644, 0.0909, 0.0654, 0.0606, 0.0579, 0.1122], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0134, 0.0141, 0.0121, 0.0123, 0.0139, 0.0140, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:53:29,760 INFO [finetune.py:976] (5/7) Epoch 18, batch 2600, loss[loss=0.2004, simple_loss=0.2809, pruned_loss=0.05995, over 4897.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.2512, pruned_loss=0.05483, over 956743.92 frames. ], batch size: 43, lr: 3.34e-03, grad_scale: 32.0 +2023-03-26 21:53:30,433 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=99972.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:53:40,421 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99980.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:54:12,873 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100007.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 21:54:24,615 INFO [finetune.py:976] (5/7) Epoch 18, batch 2650, loss[loss=0.1943, simple_loss=0.2594, pruned_loss=0.06454, over 4199.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.253, pruned_loss=0.05574, over 956340.80 frames. ], batch size: 65, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:54:29,387 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100028.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:54:35,197 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100036.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:54:42,290 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2324, 2.2138, 2.2708, 1.7690, 2.1801, 2.5125, 2.5164, 1.9526], + device='cuda:5'), covar=tensor([0.0606, 0.0690, 0.0763, 0.0953, 0.0981, 0.0672, 0.0632, 0.1110], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0135, 0.0141, 0.0122, 0.0124, 0.0139, 0.0141, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:54:46,209 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.050e+02 1.530e+02 1.912e+02 2.295e+02 4.144e+02, threshold=3.823e+02, percent-clipped=1.0 +2023-03-26 21:54:56,612 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100068.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 21:54:58,304 INFO [finetune.py:976] (5/7) Epoch 18, batch 2700, loss[loss=0.1763, simple_loss=0.2416, pruned_loss=0.05545, over 4854.00 frames. ], tot_loss[loss=0.1803, simple_loss=0.2511, pruned_loss=0.05478, over 955450.84 frames. ], batch size: 44, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:55:01,885 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100076.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:55:04,455 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.89 vs. limit=5.0 +2023-03-26 21:55:15,852 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.51 vs. limit=5.0 +2023-03-26 21:55:31,866 INFO [finetune.py:976] (5/7) Epoch 18, batch 2750, loss[loss=0.1886, simple_loss=0.2466, pruned_loss=0.06532, over 4873.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2493, pruned_loss=0.05502, over 954315.10 frames. ], batch size: 34, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:55:33,703 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100124.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:55:33,755 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100124.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 21:55:52,997 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.510e+02 1.766e+02 2.096e+02 4.575e+02, threshold=3.532e+02, percent-clipped=1.0 +2023-03-26 21:55:54,321 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9233, 2.5823, 2.3974, 1.3957, 2.5943, 2.1732, 2.0265, 2.3415], + device='cuda:5'), covar=tensor([0.1214, 0.0850, 0.1614, 0.2176, 0.1696, 0.2097, 0.2153, 0.1284], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0193, 0.0198, 0.0182, 0.0212, 0.0207, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:56:05,347 INFO [finetune.py:976] (5/7) Epoch 18, batch 2800, loss[loss=0.1649, simple_loss=0.2299, pruned_loss=0.04995, over 4821.00 frames. ], tot_loss[loss=0.178, simple_loss=0.2468, pruned_loss=0.05459, over 956255.26 frames. ], batch size: 38, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:56:06,013 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100172.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 21:56:11,427 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1158, 1.5328, 2.0488, 2.0884, 1.8436, 1.7774, 1.9789, 1.9226], + device='cuda:5'), covar=tensor([0.3804, 0.3736, 0.3118, 0.3249, 0.4453, 0.3310, 0.4137, 0.2981], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0239, 0.0259, 0.0274, 0.0273, 0.0248, 0.0284, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:56:20,136 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 21:56:38,934 INFO [finetune.py:976] (5/7) Epoch 18, batch 2850, loss[loss=0.1967, simple_loss=0.2649, pruned_loss=0.06423, over 4850.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2461, pruned_loss=0.05432, over 955130.34 frames. ], batch size: 47, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:56:40,884 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100224.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:56:49,832 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100238.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:56:54,569 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100245.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:56:59,195 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.104e+02 1.635e+02 1.899e+02 2.309e+02 4.393e+02, threshold=3.799e+02, percent-clipped=4.0 +2023-03-26 21:57:11,713 INFO [finetune.py:976] (5/7) Epoch 18, batch 2900, loss[loss=0.2234, simple_loss=0.2848, pruned_loss=0.081, over 4899.00 frames. ], tot_loss[loss=0.1806, simple_loss=0.2496, pruned_loss=0.05584, over 953444.61 frames. ], batch size: 32, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:57:12,868 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100272.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:57:26,159 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100293.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:57:30,845 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100299.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:57:45,540 INFO [finetune.py:976] (5/7) Epoch 18, batch 2950, loss[loss=0.1954, simple_loss=0.2622, pruned_loss=0.06427, over 4831.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2519, pruned_loss=0.0561, over 952322.90 frames. ], batch size: 33, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:57:55,265 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100336.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:58:06,322 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.490e+02 1.822e+02 2.174e+02 4.072e+02, threshold=3.643e+02, percent-clipped=2.0 +2023-03-26 21:58:13,599 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100363.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 21:58:18,817 INFO [finetune.py:976] (5/7) Epoch 18, batch 3000, loss[loss=0.1866, simple_loss=0.2615, pruned_loss=0.05587, over 4814.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2532, pruned_loss=0.05642, over 951188.17 frames. ], batch size: 38, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:58:18,818 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 21:58:26,877 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8455, 1.0925, 1.9317, 1.8096, 1.6944, 1.5972, 1.6872, 1.7491], + device='cuda:5'), covar=tensor([0.3849, 0.4043, 0.3588, 0.3758, 0.5029, 0.3972, 0.4418, 0.3242], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0238, 0.0259, 0.0274, 0.0273, 0.0247, 0.0282, 0.0240], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:58:31,198 INFO [finetune.py:1010] (5/7) Epoch 18, validation: loss=0.1568, simple_loss=0.2261, pruned_loss=0.04375, over 2265189.00 frames. +2023-03-26 21:58:31,199 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 21:58:44,387 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100384.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:58:49,075 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9526, 1.6447, 2.2141, 1.4858, 1.9879, 2.2587, 1.5534, 2.3525], + device='cuda:5'), covar=tensor([0.1206, 0.1901, 0.1404, 0.1980, 0.0845, 0.1248, 0.2726, 0.0736], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0202, 0.0190, 0.0187, 0.0174, 0.0211, 0.0215, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 21:59:06,993 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100402.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 21:59:29,753 INFO [finetune.py:976] (5/7) Epoch 18, batch 3050, loss[loss=0.1611, simple_loss=0.235, pruned_loss=0.04367, over 4747.00 frames. ], tot_loss[loss=0.1825, simple_loss=0.2531, pruned_loss=0.05597, over 952937.61 frames. ], batch size: 27, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 21:59:53,775 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.572e+02 1.917e+02 2.276e+02 3.597e+02, threshold=3.833e+02, percent-clipped=0.0 +2023-03-26 22:00:01,164 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100463.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 22:00:07,401 INFO [finetune.py:976] (5/7) Epoch 18, batch 3100, loss[loss=0.1648, simple_loss=0.232, pruned_loss=0.04879, over 4740.00 frames. ], tot_loss[loss=0.1815, simple_loss=0.2516, pruned_loss=0.0557, over 950260.28 frames. ], batch size: 59, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 22:00:35,456 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6306, 2.4516, 3.0362, 1.8814, 2.6465, 3.0897, 2.2521, 3.2170], + device='cuda:5'), covar=tensor([0.1388, 0.1733, 0.1493, 0.2233, 0.1098, 0.1362, 0.2405, 0.0868], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0202, 0.0189, 0.0186, 0.0174, 0.0210, 0.0214, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:00:38,997 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9485, 1.9006, 1.6228, 1.7688, 1.2116, 4.5510, 1.7342, 2.1747], + device='cuda:5'), covar=tensor([0.3075, 0.2352, 0.2094, 0.2312, 0.1623, 0.0100, 0.2272, 0.1175], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0123, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:00:40,537 INFO [finetune.py:976] (5/7) Epoch 18, batch 3150, loss[loss=0.1735, simple_loss=0.2445, pruned_loss=0.05124, over 4716.00 frames. ], tot_loss[loss=0.1793, simple_loss=0.2487, pruned_loss=0.05497, over 950855.17 frames. ], batch size: 23, lr: 3.34e-03, grad_scale: 16.0 +2023-03-26 22:01:00,945 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.466e+01 1.523e+02 1.835e+02 2.195e+02 4.344e+02, threshold=3.670e+02, percent-clipped=3.0 +2023-03-26 22:01:12,891 INFO [finetune.py:976] (5/7) Epoch 18, batch 3200, loss[loss=0.198, simple_loss=0.2649, pruned_loss=0.06552, over 4725.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.246, pruned_loss=0.05374, over 951699.24 frames. ], batch size: 23, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:01:13,484 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0124, 5.0734, 4.7996, 2.7425, 5.1773, 4.0853, 1.0505, 3.8076], + device='cuda:5'), covar=tensor([0.2219, 0.1703, 0.1332, 0.2984, 0.0735, 0.0734, 0.4789, 0.1147], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0176, 0.0160, 0.0130, 0.0160, 0.0124, 0.0149, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:01:28,940 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100594.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:01:30,892 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-26 22:01:40,040 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.69 vs. limit=5.0 +2023-03-26 22:01:42,812 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5418, 3.4420, 3.2663, 1.4469, 3.5701, 2.6619, 0.8104, 2.4101], + device='cuda:5'), covar=tensor([0.2301, 0.2319, 0.1628, 0.3468, 0.1138, 0.0985, 0.4420, 0.1472], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0176, 0.0159, 0.0129, 0.0159, 0.0124, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:01:46,318 INFO [finetune.py:976] (5/7) Epoch 18, batch 3250, loss[loss=0.1385, simple_loss=0.2196, pruned_loss=0.02868, over 4901.00 frames. ], tot_loss[loss=0.178, simple_loss=0.2471, pruned_loss=0.0545, over 952188.58 frames. ], batch size: 36, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:01:55,193 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0126, 2.5918, 2.4274, 1.3314, 2.6915, 2.0836, 2.0301, 2.3118], + device='cuda:5'), covar=tensor([0.1120, 0.0924, 0.1814, 0.2247, 0.1702, 0.2354, 0.2255, 0.1271], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0192, 0.0199, 0.0183, 0.0212, 0.0207, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:02:08,116 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.502e+02 1.862e+02 2.235e+02 4.464e+02, threshold=3.723e+02, percent-clipped=3.0 +2023-03-26 22:02:14,916 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100663.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:02:20,158 INFO [finetune.py:976] (5/7) Epoch 18, batch 3300, loss[loss=0.1866, simple_loss=0.2615, pruned_loss=0.05586, over 4793.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2497, pruned_loss=0.05473, over 952956.94 frames. ], batch size: 29, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:02:47,027 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100711.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:02:53,405 INFO [finetune.py:976] (5/7) Epoch 18, batch 3350, loss[loss=0.1857, simple_loss=0.2521, pruned_loss=0.05964, over 4814.00 frames. ], tot_loss[loss=0.1816, simple_loss=0.2523, pruned_loss=0.05541, over 955376.47 frames. ], batch size: 33, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:02:59,532 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100730.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:03:14,083 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.901e+01 1.577e+02 1.865e+02 2.249e+02 4.268e+02, threshold=3.731e+02, percent-clipped=3.0 +2023-03-26 22:03:18,280 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100758.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 22:03:23,232 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5664, 2.3675, 1.8246, 0.8669, 2.0396, 2.1743, 1.9705, 2.1556], + device='cuda:5'), covar=tensor([0.0755, 0.0682, 0.1471, 0.1805, 0.1299, 0.1858, 0.1803, 0.0782], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0193, 0.0200, 0.0183, 0.0212, 0.0207, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:03:26,648 INFO [finetune.py:976] (5/7) Epoch 18, batch 3400, loss[loss=0.168, simple_loss=0.247, pruned_loss=0.04445, over 4817.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2539, pruned_loss=0.05611, over 956503.73 frames. ], batch size: 33, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:03:40,310 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100791.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 22:04:13,289 INFO [finetune.py:976] (5/7) Epoch 18, batch 3450, loss[loss=0.1792, simple_loss=0.2475, pruned_loss=0.0555, over 4723.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2528, pruned_loss=0.05582, over 957002.72 frames. ], batch size: 59, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:04:49,164 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.551e+02 1.762e+02 2.136e+02 3.810e+02, threshold=3.524e+02, percent-clipped=1.0 +2023-03-26 22:05:04,970 INFO [finetune.py:976] (5/7) Epoch 18, batch 3500, loss[loss=0.1545, simple_loss=0.2222, pruned_loss=0.0434, over 4774.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2494, pruned_loss=0.05494, over 954617.82 frames. ], batch size: 28, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:05:08,710 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9533, 0.9766, 0.9488, 1.1276, 1.1839, 1.1157, 0.9887, 0.9201], + device='cuda:5'), covar=tensor([0.0391, 0.0343, 0.0659, 0.0314, 0.0284, 0.0468, 0.0343, 0.0390], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0107, 0.0143, 0.0110, 0.0101, 0.0108, 0.0099, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4292e-05, 8.2683e-05, 1.1293e-04, 8.4884e-05, 7.8421e-05, 8.0164e-05, + 7.3787e-05, 8.3557e-05], device='cuda:5') +2023-03-26 22:05:21,071 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100894.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:05:37,076 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5526, 1.3809, 1.2357, 1.5707, 1.6581, 1.5443, 1.0850, 1.2914], + device='cuda:5'), covar=tensor([0.2150, 0.2052, 0.1962, 0.1544, 0.1526, 0.1310, 0.2557, 0.1842], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0209, 0.0212, 0.0192, 0.0241, 0.0186, 0.0215, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:05:38,765 INFO [finetune.py:976] (5/7) Epoch 18, batch 3550, loss[loss=0.1735, simple_loss=0.2406, pruned_loss=0.0532, over 4765.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2474, pruned_loss=0.0542, over 957350.47 frames. ], batch size: 28, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:05:49,119 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6357, 2.3247, 1.9457, 2.6822, 2.5620, 2.2592, 3.0734, 2.4978], + device='cuda:5'), covar=tensor([0.1337, 0.2646, 0.3309, 0.2742, 0.2612, 0.1707, 0.2873, 0.1958], + device='cuda:5'), in_proj_covar=tensor([0.0182, 0.0186, 0.0232, 0.0251, 0.0243, 0.0201, 0.0212, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:05:52,401 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=100942.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:05:59,322 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.557e+01 1.553e+02 1.835e+02 2.348e+02 4.609e+02, threshold=3.670e+02, percent-clipped=4.0 +2023-03-26 22:06:07,730 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-03-26 22:06:12,134 INFO [finetune.py:976] (5/7) Epoch 18, batch 3600, loss[loss=0.1706, simple_loss=0.2461, pruned_loss=0.04754, over 4710.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2456, pruned_loss=0.05381, over 955742.25 frames. ], batch size: 23, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:06:25,181 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-03-26 22:06:41,390 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3670, 3.8298, 3.9767, 4.2143, 4.1362, 3.8317, 4.4607, 1.3875], + device='cuda:5'), covar=tensor([0.0848, 0.0831, 0.0804, 0.1004, 0.1116, 0.1665, 0.0636, 0.5760], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0244, 0.0278, 0.0291, 0.0334, 0.0282, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:06:46,077 INFO [finetune.py:976] (5/7) Epoch 18, batch 3650, loss[loss=0.1773, simple_loss=0.2427, pruned_loss=0.0559, over 4803.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2484, pruned_loss=0.05501, over 953098.37 frames. ], batch size: 25, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:07:06,792 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.559e+02 1.860e+02 2.177e+02 4.070e+02, threshold=3.719e+02, percent-clipped=1.0 +2023-03-26 22:07:11,005 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101058.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:07:15,163 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-26 22:07:18,909 INFO [finetune.py:976] (5/7) Epoch 18, batch 3700, loss[loss=0.1722, simple_loss=0.2559, pruned_loss=0.04424, over 4911.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2516, pruned_loss=0.05601, over 950738.26 frames. ], batch size: 43, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:07:28,513 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101086.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:07:42,150 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=101106.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:07:43,259 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101107.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:07:52,639 INFO [finetune.py:976] (5/7) Epoch 18, batch 3750, loss[loss=0.1647, simple_loss=0.2494, pruned_loss=0.04003, over 4866.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.252, pruned_loss=0.0558, over 950074.38 frames. ], batch size: 31, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:07:54,579 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0910, 1.2795, 1.2517, 1.2356, 1.3798, 2.4512, 1.2145, 1.3941], + device='cuda:5'), covar=tensor([0.1078, 0.2009, 0.1130, 0.1039, 0.1687, 0.0353, 0.1570, 0.1886], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:07:54,593 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4356, 1.3779, 1.4562, 0.6860, 1.4889, 1.4399, 1.4336, 1.2730], + device='cuda:5'), covar=tensor([0.0667, 0.0806, 0.0777, 0.1078, 0.0896, 0.0742, 0.0687, 0.1339], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0135, 0.0140, 0.0120, 0.0123, 0.0138, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:07:59,882 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4869, 1.4378, 1.7748, 1.7466, 1.6245, 3.1763, 1.4260, 1.5647], + device='cuda:5'), covar=tensor([0.0970, 0.1780, 0.1203, 0.0934, 0.1456, 0.0246, 0.1426, 0.1680], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:08:12,839 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.641e+02 1.814e+02 2.131e+02 4.110e+02, threshold=3.627e+02, percent-clipped=2.0 +2023-03-26 22:08:24,457 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101168.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:08:26,160 INFO [finetune.py:976] (5/7) Epoch 18, batch 3800, loss[loss=0.1816, simple_loss=0.2695, pruned_loss=0.04685, over 4799.00 frames. ], tot_loss[loss=0.1831, simple_loss=0.254, pruned_loss=0.05616, over 951643.20 frames. ], batch size: 51, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:08:41,953 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-03-26 22:08:59,900 INFO [finetune.py:976] (5/7) Epoch 18, batch 3850, loss[loss=0.1733, simple_loss=0.2473, pruned_loss=0.04969, over 4902.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2528, pruned_loss=0.05544, over 953670.03 frames. ], batch size: 36, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:09:02,694 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.54 vs. limit=5.0 +2023-03-26 22:09:28,205 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-26 22:09:28,676 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3735, 2.2332, 1.8362, 2.1542, 2.3166, 2.0177, 2.5282, 2.3346], + device='cuda:5'), covar=tensor([0.1250, 0.1917, 0.2797, 0.2417, 0.2359, 0.1600, 0.3024, 0.1806], + device='cuda:5'), in_proj_covar=tensor([0.0184, 0.0189, 0.0236, 0.0254, 0.0246, 0.0204, 0.0215, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:09:30,743 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.43 vs. limit=5.0 +2023-03-26 22:09:30,997 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.034e+02 1.525e+02 1.862e+02 2.180e+02 4.556e+02, threshold=3.724e+02, percent-clipped=2.0 +2023-03-26 22:09:57,716 INFO [finetune.py:976] (5/7) Epoch 18, batch 3900, loss[loss=0.2001, simple_loss=0.262, pruned_loss=0.06915, over 4718.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.2499, pruned_loss=0.05462, over 955151.21 frames. ], batch size: 23, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:10:41,970 INFO [finetune.py:976] (5/7) Epoch 18, batch 3950, loss[loss=0.1337, simple_loss=0.2061, pruned_loss=0.03059, over 4862.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.246, pruned_loss=0.05313, over 953755.99 frames. ], batch size: 44, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:11:02,252 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.024e+02 1.470e+02 1.754e+02 2.083e+02 3.090e+02, threshold=3.508e+02, percent-clipped=0.0 +2023-03-26 22:11:15,360 INFO [finetune.py:976] (5/7) Epoch 18, batch 4000, loss[loss=0.1779, simple_loss=0.2329, pruned_loss=0.06145, over 4133.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2443, pruned_loss=0.05264, over 953441.03 frames. ], batch size: 18, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:11:25,996 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101386.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 22:11:49,438 INFO [finetune.py:976] (5/7) Epoch 18, batch 4050, loss[loss=0.1885, simple_loss=0.2583, pruned_loss=0.05939, over 4876.00 frames. ], tot_loss[loss=0.181, simple_loss=0.2502, pruned_loss=0.05585, over 949338.89 frames. ], batch size: 34, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:11:58,805 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=101434.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:12:10,021 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.670e+02 2.040e+02 2.363e+02 9.256e+02, threshold=4.080e+02, percent-clipped=2.0 +2023-03-26 22:12:17,269 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101463.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:12:19,144 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2179, 2.3354, 1.9062, 1.9485, 2.7254, 2.7174, 2.2576, 2.2061], + device='cuda:5'), covar=tensor([0.0337, 0.0314, 0.0510, 0.0348, 0.0231, 0.0500, 0.0375, 0.0370], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0106, 0.0142, 0.0109, 0.0099, 0.0107, 0.0097, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.3477e-05, 8.1663e-05, 1.1151e-04, 8.3977e-05, 7.7286e-05, 7.9393e-05, + 7.2778e-05, 8.3095e-05], device='cuda:5') +2023-03-26 22:12:22,997 INFO [finetune.py:976] (5/7) Epoch 18, batch 4100, loss[loss=0.2009, simple_loss=0.2788, pruned_loss=0.06148, over 4828.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2531, pruned_loss=0.05678, over 951968.83 frames. ], batch size: 49, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:12:23,141 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0357, 2.0078, 1.6640, 1.7862, 1.8384, 1.7828, 1.9691, 2.5747], + device='cuda:5'), covar=tensor([0.3343, 0.3620, 0.2968, 0.3692, 0.3654, 0.2355, 0.3351, 0.1477], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0262, 0.0228, 0.0274, 0.0251, 0.0219, 0.0251, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:12:56,256 INFO [finetune.py:976] (5/7) Epoch 18, batch 4150, loss[loss=0.1538, simple_loss=0.2266, pruned_loss=0.04048, over 4811.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2537, pruned_loss=0.05702, over 951580.75 frames. ], batch size: 25, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:13:16,878 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.507e+01 1.508e+02 1.851e+02 2.208e+02 3.984e+02, threshold=3.702e+02, percent-clipped=0.0 +2023-03-26 22:13:29,468 INFO [finetune.py:976] (5/7) Epoch 18, batch 4200, loss[loss=0.1907, simple_loss=0.2629, pruned_loss=0.05927, over 4901.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2539, pruned_loss=0.057, over 950876.10 frames. ], batch size: 36, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:13:51,339 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1642, 2.1387, 1.7337, 2.1332, 1.9899, 2.0319, 2.0348, 2.8772], + device='cuda:5'), covar=tensor([0.3611, 0.4598, 0.3606, 0.4300, 0.5005, 0.2363, 0.4661, 0.1787], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0228, 0.0273, 0.0251, 0.0219, 0.0250, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:14:03,051 INFO [finetune.py:976] (5/7) Epoch 18, batch 4250, loss[loss=0.1728, simple_loss=0.2431, pruned_loss=0.05126, over 4893.00 frames. ], tot_loss[loss=0.1824, simple_loss=0.252, pruned_loss=0.05641, over 952100.65 frames. ], batch size: 43, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:14:04,387 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6089, 1.6084, 1.3830, 1.4913, 1.8927, 1.8103, 1.6530, 1.3730], + device='cuda:5'), covar=tensor([0.0269, 0.0277, 0.0546, 0.0285, 0.0202, 0.0378, 0.0227, 0.0376], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0105, 0.0141, 0.0109, 0.0099, 0.0107, 0.0097, 0.0108], + device='cuda:5'), out_proj_covar=tensor([7.2863e-05, 8.1284e-05, 1.1106e-04, 8.3572e-05, 7.6940e-05, 7.8845e-05, + 7.2287e-05, 8.2474e-05], device='cuda:5') +2023-03-26 22:14:24,254 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.580e+02 1.774e+02 2.219e+02 3.425e+02, threshold=3.547e+02, percent-clipped=0.0 +2023-03-26 22:14:38,481 INFO [finetune.py:976] (5/7) Epoch 18, batch 4300, loss[loss=0.1849, simple_loss=0.2579, pruned_loss=0.05592, over 4839.00 frames. ], tot_loss[loss=0.1802, simple_loss=0.2495, pruned_loss=0.05543, over 953057.06 frames. ], batch size: 33, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:14:53,982 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.30 vs. limit=5.0 +2023-03-26 22:14:54,351 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101684.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:15:33,153 INFO [finetune.py:976] (5/7) Epoch 18, batch 4350, loss[loss=0.1771, simple_loss=0.2424, pruned_loss=0.05589, over 4817.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2453, pruned_loss=0.05378, over 952442.40 frames. ], batch size: 25, lr: 3.33e-03, grad_scale: 16.0 +2023-03-26 22:16:05,152 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101745.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:16:10,238 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.272e+01 1.483e+02 1.686e+02 2.087e+02 3.591e+02, threshold=3.373e+02, percent-clipped=1.0 +2023-03-26 22:16:16,970 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101763.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:16:21,723 INFO [finetune.py:976] (5/7) Epoch 18, batch 4400, loss[loss=0.237, simple_loss=0.2889, pruned_loss=0.09251, over 4871.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2465, pruned_loss=0.05458, over 952877.71 frames. ], batch size: 34, lr: 3.32e-03, grad_scale: 16.0 +2023-03-26 22:16:27,078 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.0336, 3.6103, 3.8154, 3.7386, 3.5853, 3.4964, 4.2170, 1.4240], + device='cuda:5'), covar=tensor([0.1430, 0.1645, 0.1396, 0.1911, 0.2198, 0.2320, 0.1105, 0.7420], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0243, 0.0277, 0.0291, 0.0332, 0.0281, 0.0301, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:16:49,625 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=101811.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:16:55,630 INFO [finetune.py:976] (5/7) Epoch 18, batch 4450, loss[loss=0.19, simple_loss=0.2707, pruned_loss=0.05467, over 4808.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2514, pruned_loss=0.05632, over 952447.64 frames. ], batch size: 45, lr: 3.32e-03, grad_scale: 16.0 +2023-03-26 22:17:16,751 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.107e+02 1.606e+02 1.893e+02 2.313e+02 4.401e+02, threshold=3.785e+02, percent-clipped=7.0 +2023-03-26 22:17:29,383 INFO [finetune.py:976] (5/7) Epoch 18, batch 4500, loss[loss=0.1834, simple_loss=0.25, pruned_loss=0.05839, over 4894.00 frames. ], tot_loss[loss=0.1831, simple_loss=0.2528, pruned_loss=0.05669, over 952334.70 frames. ], batch size: 37, lr: 3.32e-03, grad_scale: 16.0 +2023-03-26 22:17:40,220 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5633, 1.5185, 1.3881, 1.7950, 1.9547, 1.8233, 1.2018, 1.3587], + device='cuda:5'), covar=tensor([0.2267, 0.2060, 0.1913, 0.1523, 0.1657, 0.1268, 0.2661, 0.1917], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0209, 0.0213, 0.0193, 0.0241, 0.0187, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:18:02,640 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101920.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 22:18:03,131 INFO [finetune.py:976] (5/7) Epoch 18, batch 4550, loss[loss=0.1942, simple_loss=0.2763, pruned_loss=0.05605, over 4905.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2544, pruned_loss=0.05695, over 953371.20 frames. ], batch size: 36, lr: 3.32e-03, grad_scale: 16.0 +2023-03-26 22:18:24,137 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.522e+02 1.794e+02 2.336e+02 4.256e+02, threshold=3.587e+02, percent-clipped=2.0 +2023-03-26 22:18:36,747 INFO [finetune.py:976] (5/7) Epoch 18, batch 4600, loss[loss=0.1841, simple_loss=0.2543, pruned_loss=0.05692, over 4822.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2538, pruned_loss=0.05695, over 954691.33 frames. ], batch size: 39, lr: 3.32e-03, grad_scale: 16.0 +2023-03-26 22:18:42,885 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101981.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 22:18:46,880 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6432, 1.5477, 1.4699, 1.5622, 1.8973, 1.8388, 1.6018, 1.3900], + device='cuda:5'), covar=tensor([0.0318, 0.0314, 0.0572, 0.0327, 0.0200, 0.0451, 0.0334, 0.0430], + device='cuda:5'), in_proj_covar=tensor([0.0094, 0.0106, 0.0141, 0.0109, 0.0099, 0.0107, 0.0097, 0.0109], + device='cuda:5'), out_proj_covar=tensor([7.2895e-05, 8.1440e-05, 1.1104e-04, 8.3614e-05, 7.7019e-05, 7.8868e-05, + 7.2311e-05, 8.2947e-05], device='cuda:5') +2023-03-26 22:18:47,466 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101987.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:18:54,944 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 22:19:11,219 INFO [finetune.py:976] (5/7) Epoch 18, batch 4650, loss[loss=0.1582, simple_loss=0.2257, pruned_loss=0.0453, over 4934.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2506, pruned_loss=0.0561, over 956988.89 frames. ], batch size: 38, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:19:12,852 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.58 vs. limit=5.0 +2023-03-26 22:19:23,877 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102040.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:19:29,230 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102048.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:19:31,548 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.564e+02 1.867e+02 2.217e+02 4.281e+02, threshold=3.734e+02, percent-clipped=4.0 +2023-03-26 22:19:32,245 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4840, 3.8482, 4.0087, 4.3051, 4.2439, 3.8955, 4.5899, 1.4276], + device='cuda:5'), covar=tensor([0.0826, 0.0921, 0.0888, 0.0957, 0.1254, 0.1654, 0.0628, 0.6001], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0243, 0.0279, 0.0291, 0.0333, 0.0282, 0.0302, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:19:45,056 INFO [finetune.py:976] (5/7) Epoch 18, batch 4700, loss[loss=0.1691, simple_loss=0.2281, pruned_loss=0.05506, over 4826.00 frames. ], tot_loss[loss=0.1785, simple_loss=0.247, pruned_loss=0.05498, over 956975.75 frames. ], batch size: 30, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:19:53,779 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 22:20:28,610 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.7262, 4.1073, 4.3147, 4.5124, 4.4728, 4.1777, 4.8338, 1.5632], + device='cuda:5'), covar=tensor([0.0870, 0.0816, 0.0744, 0.1070, 0.1200, 0.1555, 0.0552, 0.5935], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0243, 0.0278, 0.0291, 0.0332, 0.0282, 0.0302, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:20:31,468 INFO [finetune.py:976] (5/7) Epoch 18, batch 4750, loss[loss=0.1538, simple_loss=0.2361, pruned_loss=0.03576, over 4777.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.2455, pruned_loss=0.05433, over 957121.16 frames. ], batch size: 26, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:20:35,368 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.22 vs. limit=5.0 +2023-03-26 22:20:55,661 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4227, 1.4552, 1.6014, 1.6433, 1.6060, 3.0194, 1.3912, 1.5231], + device='cuda:5'), covar=tensor([0.1037, 0.1909, 0.1088, 0.0961, 0.1577, 0.0275, 0.1467, 0.1754], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0090, 0.0080, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:20:56,163 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.538e+02 1.904e+02 2.326e+02 4.380e+02, threshold=3.807e+02, percent-clipped=2.0 +2023-03-26 22:21:23,319 INFO [finetune.py:976] (5/7) Epoch 18, batch 4800, loss[loss=0.224, simple_loss=0.2975, pruned_loss=0.07526, over 4852.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.2485, pruned_loss=0.05517, over 957782.97 frames. ], batch size: 47, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:21:24,592 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.0286, 3.4681, 3.6754, 3.9057, 3.8063, 3.4671, 4.1099, 1.4142], + device='cuda:5'), covar=tensor([0.0889, 0.0918, 0.0905, 0.1053, 0.1292, 0.1796, 0.0774, 0.5677], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0242, 0.0277, 0.0289, 0.0330, 0.0280, 0.0300, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:22:00,412 INFO [finetune.py:976] (5/7) Epoch 18, batch 4850, loss[loss=0.1686, simple_loss=0.2274, pruned_loss=0.05485, over 4750.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2516, pruned_loss=0.05603, over 958788.11 frames. ], batch size: 27, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:22:13,797 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5354, 1.5771, 1.5256, 0.8544, 1.6586, 1.8514, 1.8401, 1.4339], + device='cuda:5'), covar=tensor([0.1069, 0.0591, 0.0498, 0.0543, 0.0423, 0.0539, 0.0367, 0.0661], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0152, 0.0124, 0.0127, 0.0131, 0.0129, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.1245e-05, 1.0990e-04, 8.8467e-05, 9.0174e-05, 9.2581e-05, 9.2666e-05, + 1.0224e-04, 1.0638e-04], device='cuda:5') +2023-03-26 22:22:20,214 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.897e+01 1.478e+02 1.803e+02 2.234e+02 3.533e+02, threshold=3.606e+02, percent-clipped=0.0 +2023-03-26 22:22:33,608 INFO [finetune.py:976] (5/7) Epoch 18, batch 4900, loss[loss=0.1673, simple_loss=0.2375, pruned_loss=0.04859, over 4739.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2522, pruned_loss=0.0561, over 958335.91 frames. ], batch size: 54, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:22:36,471 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7873, 1.6424, 2.1437, 2.0969, 1.8647, 4.3414, 1.7273, 1.8355], + device='cuda:5'), covar=tensor([0.0969, 0.1885, 0.1188, 0.0931, 0.1629, 0.0166, 0.1418, 0.1827], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:22:37,664 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102276.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 22:22:56,978 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4671, 3.8476, 4.0754, 4.3257, 4.1973, 3.8869, 4.5729, 1.4151], + device='cuda:5'), covar=tensor([0.0755, 0.0866, 0.0834, 0.1042, 0.1191, 0.1700, 0.0646, 0.6039], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0244, 0.0279, 0.0292, 0.0334, 0.0283, 0.0302, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:23:06,928 INFO [finetune.py:976] (5/7) Epoch 18, batch 4950, loss[loss=0.1745, simple_loss=0.2542, pruned_loss=0.04734, over 4802.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2532, pruned_loss=0.05604, over 957811.87 frames. ], batch size: 40, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:23:19,537 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102340.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:23:21,351 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102343.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:23:26,708 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.970e+01 1.449e+02 1.847e+02 2.188e+02 4.191e+02, threshold=3.694e+02, percent-clipped=1.0 +2023-03-26 22:23:40,084 INFO [finetune.py:976] (5/7) Epoch 18, batch 5000, loss[loss=0.191, simple_loss=0.251, pruned_loss=0.06545, over 4812.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2518, pruned_loss=0.05587, over 957622.48 frames. ], batch size: 39, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:23:51,810 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=102388.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:24:13,297 INFO [finetune.py:976] (5/7) Epoch 18, batch 5050, loss[loss=0.1621, simple_loss=0.2403, pruned_loss=0.04197, over 4758.00 frames. ], tot_loss[loss=0.18, simple_loss=0.2489, pruned_loss=0.05549, over 955960.58 frames. ], batch size: 28, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:24:21,523 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3805, 2.9273, 2.7996, 1.3309, 3.0282, 2.3473, 0.7143, 1.9969], + device='cuda:5'), covar=tensor([0.2204, 0.2314, 0.1838, 0.3463, 0.1652, 0.1163, 0.4256, 0.1764], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0179, 0.0162, 0.0130, 0.0163, 0.0125, 0.0150, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:24:33,971 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.223e+01 1.582e+02 1.966e+02 2.354e+02 3.513e+02, threshold=3.932e+02, percent-clipped=0.0 +2023-03-26 22:24:46,889 INFO [finetune.py:976] (5/7) Epoch 18, batch 5100, loss[loss=0.1652, simple_loss=0.2356, pruned_loss=0.04737, over 4730.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2461, pruned_loss=0.05476, over 955776.79 frames. ], batch size: 59, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:24:56,975 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8281, 4.1455, 3.9705, 2.2259, 4.3018, 3.1894, 0.8352, 2.9105], + device='cuda:5'), covar=tensor([0.2279, 0.2360, 0.1533, 0.3333, 0.1089, 0.1055, 0.4996, 0.1645], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0180, 0.0163, 0.0131, 0.0164, 0.0126, 0.0151, 0.0126], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:25:06,481 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0291, 1.9662, 2.0645, 1.9660, 1.5667, 4.7108, 1.7838, 2.2940], + device='cuda:5'), covar=tensor([0.3100, 0.2263, 0.1782, 0.2170, 0.1500, 0.0129, 0.2358, 0.1150], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0114, 0.0119, 0.0123, 0.0113, 0.0095, 0.0096, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:25:11,401 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9303, 1.0912, 1.9236, 1.8414, 1.6871, 1.6434, 1.7314, 1.7795], + device='cuda:5'), covar=tensor([0.3481, 0.3647, 0.3103, 0.3158, 0.4511, 0.3469, 0.4058, 0.2847], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0240, 0.0258, 0.0275, 0.0274, 0.0247, 0.0282, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:25:20,569 INFO [finetune.py:976] (5/7) Epoch 18, batch 5150, loss[loss=0.1733, simple_loss=0.2416, pruned_loss=0.05251, over 4763.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2458, pruned_loss=0.05429, over 954420.79 frames. ], batch size: 28, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:25:53,893 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.631e+02 1.989e+02 2.441e+02 4.766e+02, threshold=3.977e+02, percent-clipped=3.0 +2023-03-26 22:26:14,502 INFO [finetune.py:976] (5/7) Epoch 18, batch 5200, loss[loss=0.1969, simple_loss=0.2741, pruned_loss=0.05991, over 4915.00 frames. ], tot_loss[loss=0.1803, simple_loss=0.2499, pruned_loss=0.05538, over 954992.23 frames. ], batch size: 43, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:26:22,141 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102576.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:26:53,515 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.49 vs. limit=5.0 +2023-03-26 22:26:56,628 INFO [finetune.py:976] (5/7) Epoch 18, batch 5250, loss[loss=0.1754, simple_loss=0.2297, pruned_loss=0.06057, over 3758.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.253, pruned_loss=0.05608, over 954432.63 frames. ], batch size: 16, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:26:56,754 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2310, 2.2527, 2.8128, 1.5635, 2.4710, 2.5535, 2.0665, 2.7777], + device='cuda:5'), covar=tensor([0.1599, 0.1935, 0.1527, 0.2544, 0.1059, 0.1891, 0.2838, 0.1041], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0204, 0.0190, 0.0190, 0.0176, 0.0214, 0.0218, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:26:58,553 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=102624.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:27:11,989 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102643.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:27:17,764 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.670e+02 1.981e+02 2.217e+02 4.217e+02, threshold=3.962e+02, percent-clipped=1.0 +2023-03-26 22:27:27,037 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0118, 2.1381, 1.7214, 1.8742, 2.4391, 2.5171, 2.0955, 1.9916], + device='cuda:5'), covar=tensor([0.0373, 0.0344, 0.0598, 0.0344, 0.0275, 0.0503, 0.0312, 0.0384], + device='cuda:5'), in_proj_covar=tensor([0.0095, 0.0107, 0.0143, 0.0110, 0.0099, 0.0108, 0.0098, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.3927e-05, 8.2310e-05, 1.1271e-04, 8.4591e-05, 7.7309e-05, 7.9578e-05, + 7.3443e-05, 8.3837e-05], device='cuda:5') +2023-03-26 22:27:29,391 INFO [finetune.py:976] (5/7) Epoch 18, batch 5300, loss[loss=0.1666, simple_loss=0.2362, pruned_loss=0.04852, over 4821.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2536, pruned_loss=0.05593, over 951752.94 frames. ], batch size: 30, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:27:44,023 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=102691.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:28:03,085 INFO [finetune.py:976] (5/7) Epoch 18, batch 5350, loss[loss=0.1542, simple_loss=0.2128, pruned_loss=0.04785, over 4804.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2528, pruned_loss=0.05498, over 954081.82 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:28:10,280 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2431, 3.6595, 3.8458, 4.0958, 3.9995, 3.6936, 4.3253, 1.3506], + device='cuda:5'), covar=tensor([0.0842, 0.0846, 0.0827, 0.1021, 0.1158, 0.1695, 0.0711, 0.5659], + device='cuda:5'), in_proj_covar=tensor([0.0355, 0.0246, 0.0281, 0.0293, 0.0335, 0.0285, 0.0305, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:28:12,005 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8773, 1.6871, 1.4969, 1.2471, 1.6490, 1.6213, 1.5806, 2.1923], + device='cuda:5'), covar=tensor([0.3796, 0.3910, 0.3150, 0.3680, 0.3907, 0.2282, 0.3679, 0.1705], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0261, 0.0229, 0.0275, 0.0251, 0.0220, 0.0251, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:28:19,658 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4682, 1.3674, 1.5207, 0.7821, 1.5474, 1.4843, 1.4689, 1.3298], + device='cuda:5'), covar=tensor([0.0640, 0.0860, 0.0669, 0.0960, 0.0805, 0.0750, 0.0687, 0.1269], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0134, 0.0140, 0.0120, 0.0124, 0.0138, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:28:25,304 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.536e+02 1.872e+02 2.228e+02 4.473e+02, threshold=3.745e+02, percent-clipped=1.0 +2023-03-26 22:28:36,910 INFO [finetune.py:976] (5/7) Epoch 18, batch 5400, loss[loss=0.1484, simple_loss=0.2024, pruned_loss=0.04719, over 4164.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.2499, pruned_loss=0.05444, over 952992.87 frames. ], batch size: 18, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:29:10,820 INFO [finetune.py:976] (5/7) Epoch 18, batch 5450, loss[loss=0.1576, simple_loss=0.2339, pruned_loss=0.0406, over 4693.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2477, pruned_loss=0.054, over 953671.79 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:29:30,969 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102851.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:29:31,449 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.551e+01 1.511e+02 1.793e+02 2.102e+02 5.113e+02, threshold=3.586e+02, percent-clipped=1.0 +2023-03-26 22:29:44,411 INFO [finetune.py:976] (5/7) Epoch 18, batch 5500, loss[loss=0.166, simple_loss=0.2467, pruned_loss=0.0426, over 4778.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2449, pruned_loss=0.05306, over 954376.04 frames. ], batch size: 29, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:29:54,092 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102887.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:30:05,834 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5347, 1.4840, 2.1497, 1.7837, 1.7912, 3.9316, 1.4522, 1.6515], + device='cuda:5'), covar=tensor([0.0947, 0.1751, 0.1237, 0.0981, 0.1493, 0.0209, 0.1433, 0.1757], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0073, 0.0077, 0.0090, 0.0079, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:30:12,684 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102912.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:30:18,101 INFO [finetune.py:976] (5/7) Epoch 18, batch 5550, loss[loss=0.1398, simple_loss=0.2164, pruned_loss=0.03163, over 4808.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2463, pruned_loss=0.05364, over 953748.56 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 32.0 +2023-03-26 22:30:36,205 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102948.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:30:39,503 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.559e+02 1.902e+02 2.213e+02 4.520e+02, threshold=3.805e+02, percent-clipped=3.0 +2023-03-26 22:30:40,224 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6593, 1.6544, 1.4704, 1.8606, 2.2896, 1.8613, 1.6993, 1.3383], + device='cuda:5'), covar=tensor([0.2158, 0.1974, 0.1926, 0.1486, 0.1756, 0.1235, 0.2307, 0.1938], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0207, 0.0212, 0.0191, 0.0240, 0.0185, 0.0214, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:30:50,053 INFO [finetune.py:976] (5/7) Epoch 18, batch 5600, loss[loss=0.1798, simple_loss=0.2469, pruned_loss=0.0563, over 4812.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.2503, pruned_loss=0.05469, over 952003.48 frames. ], batch size: 51, lr: 3.32e-03, grad_scale: 16.0 +2023-03-26 22:31:06,898 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2638, 2.0121, 1.4668, 0.6236, 1.7033, 1.8294, 1.7101, 1.8169], + device='cuda:5'), covar=tensor([0.0793, 0.0713, 0.1290, 0.1890, 0.1292, 0.2215, 0.2060, 0.0787], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0194, 0.0200, 0.0183, 0.0211, 0.0208, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:31:42,262 INFO [finetune.py:976] (5/7) Epoch 18, batch 5650, loss[loss=0.1518, simple_loss=0.2258, pruned_loss=0.03886, over 4790.00 frames. ], tot_loss[loss=0.1801, simple_loss=0.2514, pruned_loss=0.05444, over 951841.93 frames. ], batch size: 29, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:31:44,118 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5049, 3.3140, 3.1783, 1.6566, 3.4246, 2.6361, 1.1749, 2.3016], + device='cuda:5'), covar=tensor([0.2423, 0.2202, 0.1599, 0.3214, 0.1259, 0.1043, 0.3643, 0.1644], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0175, 0.0158, 0.0128, 0.0159, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:31:45,373 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2948, 2.8478, 2.7446, 1.4452, 2.9903, 2.2531, 0.9754, 1.8725], + device='cuda:5'), covar=tensor([0.2664, 0.2236, 0.1707, 0.3150, 0.1398, 0.1029, 0.3610, 0.1669], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0175, 0.0159, 0.0128, 0.0159, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:32:09,153 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.800e+01 1.506e+02 1.835e+02 2.191e+02 3.638e+02, threshold=3.670e+02, percent-clipped=0.0 +2023-03-26 22:32:19,852 INFO [finetune.py:976] (5/7) Epoch 18, batch 5700, loss[loss=0.1306, simple_loss=0.1889, pruned_loss=0.03617, over 4119.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2473, pruned_loss=0.05419, over 932142.73 frames. ], batch size: 18, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:32:48,077 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103098.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:32:48,576 INFO [finetune.py:976] (5/7) Epoch 19, batch 0, loss[loss=0.2339, simple_loss=0.2967, pruned_loss=0.08555, over 4843.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.2967, pruned_loss=0.08555, over 4843.00 frames. ], batch size: 44, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:32:48,576 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 22:33:03,099 INFO [finetune.py:1010] (5/7) Epoch 19, validation: loss=0.1586, simple_loss=0.2282, pruned_loss=0.04454, over 2265189.00 frames. +2023-03-26 22:33:03,100 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 22:33:25,837 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103132.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:33:33,043 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-26 22:33:38,061 INFO [finetune.py:976] (5/7) Epoch 19, batch 50, loss[loss=0.1735, simple_loss=0.2459, pruned_loss=0.05054, over 4849.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.2591, pruned_loss=0.06071, over 216707.92 frames. ], batch size: 44, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:33:40,495 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.779e+01 1.456e+02 1.782e+02 2.150e+02 3.860e+02, threshold=3.565e+02, percent-clipped=1.0 +2023-03-26 22:33:42,016 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-26 22:33:44,764 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103159.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:33:58,666 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 22:34:07,711 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103193.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:34:11,663 INFO [finetune.py:976] (5/7) Epoch 19, batch 100, loss[loss=0.1966, simple_loss=0.2486, pruned_loss=0.07228, over 4266.00 frames. ], tot_loss[loss=0.1825, simple_loss=0.2509, pruned_loss=0.05708, over 379645.11 frames. ], batch size: 65, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:34:17,535 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103207.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:34:41,200 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103243.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:34:42,254 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 22:34:44,090 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2901, 2.8957, 3.0204, 3.2370, 3.0565, 2.8723, 3.3342, 0.9302], + device='cuda:5'), covar=tensor([0.1198, 0.1106, 0.1126, 0.1207, 0.1722, 0.1821, 0.1196, 0.5632], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0245, 0.0281, 0.0292, 0.0334, 0.0284, 0.0304, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:34:45,882 INFO [finetune.py:976] (5/7) Epoch 19, batch 150, loss[loss=0.1622, simple_loss=0.2248, pruned_loss=0.04978, over 4824.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2446, pruned_loss=0.0543, over 508922.72 frames. ], batch size: 38, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:34:48,708 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.463e+02 1.787e+02 2.269e+02 3.542e+02, threshold=3.573e+02, percent-clipped=0.0 +2023-03-26 22:35:03,450 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1289, 1.8513, 2.1691, 2.1167, 1.8510, 1.8749, 2.0593, 1.9857], + device='cuda:5'), covar=tensor([0.3971, 0.4259, 0.3045, 0.4122, 0.5202, 0.4117, 0.4830, 0.3136], + device='cuda:5'), in_proj_covar=tensor([0.0250, 0.0241, 0.0260, 0.0276, 0.0274, 0.0249, 0.0284, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:35:19,727 INFO [finetune.py:976] (5/7) Epoch 19, batch 200, loss[loss=0.1661, simple_loss=0.2414, pruned_loss=0.04543, over 4849.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2431, pruned_loss=0.0533, over 609223.99 frames. ], batch size: 47, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:35:34,078 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7123, 3.4067, 3.2101, 1.7227, 3.5573, 2.6097, 0.8603, 2.3222], + device='cuda:5'), covar=tensor([0.2251, 0.2231, 0.1564, 0.3206, 0.1078, 0.1056, 0.4351, 0.1626], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0175, 0.0159, 0.0128, 0.0159, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:35:53,179 INFO [finetune.py:976] (5/7) Epoch 19, batch 250, loss[loss=0.1874, simple_loss=0.2587, pruned_loss=0.05805, over 4867.00 frames. ], tot_loss[loss=0.1802, simple_loss=0.2492, pruned_loss=0.05556, over 685749.09 frames. ], batch size: 34, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:35:56,517 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.389e+01 1.572e+02 1.886e+02 2.263e+02 4.128e+02, threshold=3.772e+02, percent-clipped=1.0 +2023-03-26 22:36:05,020 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103366.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:36:10,001 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.61 vs. limit=5.0 +2023-03-26 22:36:25,725 INFO [finetune.py:976] (5/7) Epoch 19, batch 300, loss[loss=0.1648, simple_loss=0.2441, pruned_loss=0.04272, over 4908.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2526, pruned_loss=0.05639, over 746635.32 frames. ], batch size: 35, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:36:39,105 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-26 22:37:01,223 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-26 22:37:02,195 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103427.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 22:37:12,457 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-26 22:37:21,696 INFO [finetune.py:976] (5/7) Epoch 19, batch 350, loss[loss=0.2151, simple_loss=0.2838, pruned_loss=0.07317, over 4912.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2542, pruned_loss=0.0569, over 792602.47 frames. ], batch size: 46, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:37:28,082 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.109e+02 1.554e+02 1.898e+02 2.403e+02 5.343e+02, threshold=3.796e+02, percent-clipped=4.0 +2023-03-26 22:37:29,795 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103454.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:38:03,043 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103488.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:38:10,178 INFO [finetune.py:976] (5/7) Epoch 19, batch 400, loss[loss=0.2124, simple_loss=0.2819, pruned_loss=0.07144, over 4896.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.254, pruned_loss=0.05663, over 828725.82 frames. ], batch size: 35, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:38:13,990 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 22:38:16,109 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103507.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:38:22,601 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2475, 1.2896, 1.5179, 1.0250, 1.3759, 1.3995, 1.3050, 1.5437], + device='cuda:5'), covar=tensor([0.1077, 0.1895, 0.1156, 0.1365, 0.0778, 0.1219, 0.2535, 0.0820], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0202, 0.0189, 0.0188, 0.0173, 0.0212, 0.0216, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:38:24,879 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5945, 1.4921, 2.1750, 3.3934, 2.2537, 2.4541, 1.0180, 2.8591], + device='cuda:5'), covar=tensor([0.1717, 0.1442, 0.1299, 0.0624, 0.0776, 0.1351, 0.1793, 0.0468], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0115, 0.0132, 0.0163, 0.0099, 0.0134, 0.0123, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 22:38:39,517 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103543.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:38:43,097 INFO [finetune.py:976] (5/7) Epoch 19, batch 450, loss[loss=0.1723, simple_loss=0.2506, pruned_loss=0.04695, over 4836.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.2519, pruned_loss=0.05542, over 858446.14 frames. ], batch size: 39, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:38:45,991 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.312e+01 1.502e+02 1.696e+02 2.061e+02 2.854e+02, threshold=3.392e+02, percent-clipped=0.0 +2023-03-26 22:38:47,238 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=103555.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:38:51,356 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-26 22:39:04,050 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-26 22:39:19,609 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=103591.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:39:24,373 INFO [finetune.py:976] (5/7) Epoch 19, batch 500, loss[loss=0.2145, simple_loss=0.2706, pruned_loss=0.07916, over 4922.00 frames. ], tot_loss[loss=0.1807, simple_loss=0.2502, pruned_loss=0.05561, over 880648.90 frames. ], batch size: 37, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:39:57,691 INFO [finetune.py:976] (5/7) Epoch 19, batch 550, loss[loss=0.1488, simple_loss=0.2242, pruned_loss=0.03669, over 4832.00 frames. ], tot_loss[loss=0.1785, simple_loss=0.2473, pruned_loss=0.05485, over 897317.49 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:40:00,581 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.543e+02 1.834e+02 2.179e+02 4.966e+02, threshold=3.668e+02, percent-clipped=2.0 +2023-03-26 22:40:29,604 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6623, 1.5541, 1.0946, 0.2973, 1.2787, 1.5059, 1.5251, 1.4881], + device='cuda:5'), covar=tensor([0.1015, 0.0876, 0.1354, 0.1983, 0.1441, 0.2357, 0.2226, 0.0845], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0191, 0.0198, 0.0181, 0.0209, 0.0205, 0.0219, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:40:31,341 INFO [finetune.py:976] (5/7) Epoch 19, batch 600, loss[loss=0.1631, simple_loss=0.242, pruned_loss=0.0421, over 4932.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2477, pruned_loss=0.0553, over 909072.48 frames. ], batch size: 38, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:40:47,251 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103722.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:40:58,724 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103740.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:41:04,044 INFO [finetune.py:976] (5/7) Epoch 19, batch 650, loss[loss=0.2112, simple_loss=0.2809, pruned_loss=0.07076, over 4924.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2492, pruned_loss=0.05461, over 921998.88 frames. ], batch size: 42, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:41:06,467 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.539e+02 1.795e+02 2.169e+02 3.837e+02, threshold=3.591e+02, percent-clipped=1.0 +2023-03-26 22:41:07,211 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103754.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:41:15,347 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103765.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:41:20,076 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.84 vs. limit=5.0 +2023-03-26 22:41:30,853 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103788.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:41:37,408 INFO [finetune.py:976] (5/7) Epoch 19, batch 700, loss[loss=0.1802, simple_loss=0.2631, pruned_loss=0.04867, over 4864.00 frames. ], tot_loss[loss=0.1806, simple_loss=0.251, pruned_loss=0.05508, over 930873.99 frames. ], batch size: 34, lr: 3.31e-03, grad_scale: 16.0 +2023-03-26 22:41:38,897 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103801.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:41:39,437 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=103802.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:42:02,018 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103826.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:42:03,720 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9085, 1.9044, 1.9812, 1.2272, 1.9532, 1.9158, 2.0066, 1.6324], + device='cuda:5'), covar=tensor([0.0641, 0.0666, 0.0625, 0.0896, 0.0655, 0.0674, 0.0613, 0.1120], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0134, 0.0140, 0.0120, 0.0124, 0.0137, 0.0139, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:42:13,880 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=103836.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:42:25,801 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5783, 1.4921, 2.0516, 3.0787, 1.9989, 2.2920, 1.0012, 2.6220], + device='cuda:5'), covar=tensor([0.1633, 0.1467, 0.1258, 0.0644, 0.0841, 0.1441, 0.1808, 0.0528], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0117, 0.0134, 0.0165, 0.0100, 0.0137, 0.0125, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 22:42:26,321 INFO [finetune.py:976] (5/7) Epoch 19, batch 750, loss[loss=0.1951, simple_loss=0.2639, pruned_loss=0.06313, over 4845.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2516, pruned_loss=0.05563, over 935548.16 frames. ], batch size: 49, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:42:33,271 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.573e+02 1.871e+02 2.192e+02 5.260e+02, threshold=3.742e+02, percent-clipped=2.0 +2023-03-26 22:42:50,350 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6444, 1.1718, 0.8171, 1.5396, 2.0886, 0.9921, 1.3999, 1.4456], + device='cuda:5'), covar=tensor([0.1544, 0.2147, 0.1919, 0.1180, 0.1836, 0.1978, 0.1497, 0.2075], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0092, 0.0120, 0.0093, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 22:43:03,210 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103876.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:43:15,898 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-03-26 22:43:17,429 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0480, 1.9135, 1.6446, 1.7657, 1.7868, 1.7567, 1.8278, 2.4451], + device='cuda:5'), covar=tensor([0.4100, 0.4225, 0.3681, 0.3777, 0.4136, 0.2788, 0.3825, 0.1900], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0230, 0.0276, 0.0252, 0.0220, 0.0252, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:43:22,083 INFO [finetune.py:976] (5/7) Epoch 19, batch 800, loss[loss=0.188, simple_loss=0.2598, pruned_loss=0.05804, over 4817.00 frames. ], tot_loss[loss=0.1808, simple_loss=0.2511, pruned_loss=0.05529, over 939636.39 frames. ], batch size: 33, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:43:48,638 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103937.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 22:43:55,832 INFO [finetune.py:976] (5/7) Epoch 19, batch 850, loss[loss=0.1866, simple_loss=0.2651, pruned_loss=0.05403, over 4820.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2488, pruned_loss=0.05429, over 944389.32 frames. ], batch size: 38, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:43:58,240 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.499e+02 1.798e+02 2.103e+02 3.961e+02, threshold=3.597e+02, percent-clipped=1.0 +2023-03-26 22:44:25,966 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103990.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:44:30,242 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103997.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:44:31,320 INFO [finetune.py:976] (5/7) Epoch 19, batch 900, loss[loss=0.2517, simple_loss=0.2963, pruned_loss=0.1036, over 4234.00 frames. ], tot_loss[loss=0.1784, simple_loss=0.2477, pruned_loss=0.05456, over 945978.96 frames. ], batch size: 65, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:44:46,733 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104022.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:44:53,451 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-03-26 22:45:05,988 INFO [finetune.py:976] (5/7) Epoch 19, batch 950, loss[loss=0.2231, simple_loss=0.2747, pruned_loss=0.08572, over 4869.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.2465, pruned_loss=0.05402, over 949172.40 frames. ], batch size: 34, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:45:06,104 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104049.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:45:07,330 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104051.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:45:08,386 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.542e+01 1.532e+02 1.748e+02 2.079e+02 4.067e+02, threshold=3.497e+02, percent-clipped=1.0 +2023-03-26 22:45:11,552 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104058.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:45:18,740 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=104070.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 22:45:36,943 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104096.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:45:38,694 INFO [finetune.py:976] (5/7) Epoch 19, batch 1000, loss[loss=0.2358, simple_loss=0.3045, pruned_loss=0.08351, over 4902.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2482, pruned_loss=0.05461, over 946909.68 frames. ], batch size: 43, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:45:45,440 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104110.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:45:52,086 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104121.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:46:12,347 INFO [finetune.py:976] (5/7) Epoch 19, batch 1050, loss[loss=0.1514, simple_loss=0.2281, pruned_loss=0.03737, over 4734.00 frames. ], tot_loss[loss=0.1806, simple_loss=0.2512, pruned_loss=0.05498, over 950575.05 frames. ], batch size: 27, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:46:14,763 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.111e+02 1.591e+02 1.940e+02 2.273e+02 3.456e+02, threshold=3.881e+02, percent-clipped=0.0 +2023-03-26 22:46:45,924 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6809, 1.5812, 1.4092, 1.7666, 2.0676, 1.7926, 1.2791, 1.4047], + device='cuda:5'), covar=tensor([0.2160, 0.2035, 0.1959, 0.1587, 0.1587, 0.1196, 0.2386, 0.2035], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0209, 0.0214, 0.0193, 0.0242, 0.0187, 0.0216, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:46:53,937 INFO [finetune.py:976] (5/7) Epoch 19, batch 1100, loss[loss=0.2166, simple_loss=0.287, pruned_loss=0.0731, over 4887.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2529, pruned_loss=0.05553, over 952193.48 frames. ], batch size: 35, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:47:02,609 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-26 22:47:16,923 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104232.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 22:47:39,152 INFO [finetune.py:976] (5/7) Epoch 19, batch 1150, loss[loss=0.1631, simple_loss=0.2456, pruned_loss=0.04029, over 4819.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.2537, pruned_loss=0.05593, over 953099.29 frames. ], batch size: 33, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:47:47,026 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.710e+02 1.992e+02 2.366e+02 4.129e+02, threshold=3.984e+02, percent-clipped=1.0 +2023-03-26 22:47:50,720 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-26 22:48:10,446 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-26 22:48:22,454 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 22:48:25,526 INFO [finetune.py:976] (5/7) Epoch 19, batch 1200, loss[loss=0.1747, simple_loss=0.2409, pruned_loss=0.05424, over 4805.00 frames. ], tot_loss[loss=0.1817, simple_loss=0.252, pruned_loss=0.05567, over 952793.44 frames. ], batch size: 40, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:49:05,634 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104346.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:49:07,373 INFO [finetune.py:976] (5/7) Epoch 19, batch 1250, loss[loss=0.1644, simple_loss=0.2383, pruned_loss=0.04525, over 4834.00 frames. ], tot_loss[loss=0.18, simple_loss=0.2496, pruned_loss=0.05521, over 951747.82 frames. ], batch size: 47, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:49:10,326 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.979e+01 1.472e+02 1.754e+02 2.218e+02 4.171e+02, threshold=3.509e+02, percent-clipped=1.0 +2023-03-26 22:49:10,411 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104353.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:49:12,809 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0391, 0.9763, 0.9987, 0.4545, 0.9691, 1.1614, 1.2696, 0.9617], + device='cuda:5'), covar=tensor([0.0916, 0.0654, 0.0651, 0.0581, 0.0648, 0.0743, 0.0519, 0.0828], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0151, 0.0124, 0.0126, 0.0130, 0.0128, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.1021e-05, 1.0925e-04, 8.8856e-05, 8.9371e-05, 9.1788e-05, 9.1947e-05, + 1.0164e-04, 1.0630e-04], device='cuda:5') +2023-03-26 22:49:20,631 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8124, 1.7259, 1.5513, 1.3993, 1.9176, 1.6204, 1.7767, 1.8272], + device='cuda:5'), covar=tensor([0.1426, 0.1962, 0.2927, 0.2455, 0.2534, 0.1625, 0.2796, 0.1728], + device='cuda:5'), in_proj_covar=tensor([0.0185, 0.0188, 0.0235, 0.0253, 0.0245, 0.0203, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:49:39,162 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104396.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:49:40,605 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-26 22:49:41,416 INFO [finetune.py:976] (5/7) Epoch 19, batch 1300, loss[loss=0.1952, simple_loss=0.2642, pruned_loss=0.06314, over 4825.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2472, pruned_loss=0.05431, over 954610.05 frames. ], batch size: 40, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:49:45,744 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104405.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:49:56,434 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104421.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:50:07,990 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7977, 1.6282, 1.4378, 1.3032, 1.5786, 1.5351, 1.5775, 2.0960], + device='cuda:5'), covar=tensor([0.3538, 0.3361, 0.2774, 0.3261, 0.3403, 0.2021, 0.3222, 0.1686], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0230, 0.0275, 0.0251, 0.0220, 0.0252, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:50:08,521 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1477, 3.6300, 3.7769, 3.9781, 3.9303, 3.7177, 4.2587, 1.3547], + device='cuda:5'), covar=tensor([0.0846, 0.0837, 0.0887, 0.1093, 0.1243, 0.1549, 0.0739, 0.5867], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0241, 0.0278, 0.0288, 0.0329, 0.0280, 0.0300, 0.0293], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:50:10,348 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=104444.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:50:14,762 INFO [finetune.py:976] (5/7) Epoch 19, batch 1350, loss[loss=0.2484, simple_loss=0.3182, pruned_loss=0.08934, over 4926.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.2484, pruned_loss=0.05534, over 956438.69 frames. ], batch size: 38, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:50:17,644 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.617e+02 1.931e+02 2.310e+02 3.973e+02, threshold=3.863e+02, percent-clipped=4.0 +2023-03-26 22:50:25,181 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 22:50:29,050 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=104469.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:50:38,096 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9600, 2.5288, 3.2081, 2.1607, 2.9248, 3.2986, 2.3921, 3.4334], + device='cuda:5'), covar=tensor([0.1016, 0.1726, 0.1216, 0.1879, 0.0732, 0.1119, 0.2268, 0.0600], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0203, 0.0190, 0.0188, 0.0175, 0.0213, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:50:48,509 INFO [finetune.py:976] (5/7) Epoch 19, batch 1400, loss[loss=0.1832, simple_loss=0.265, pruned_loss=0.05072, over 4916.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2523, pruned_loss=0.05656, over 958426.35 frames. ], batch size: 38, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:51:05,772 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-03-26 22:51:10,567 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104532.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 22:51:13,690 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-03-26 22:51:18,418 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7422, 3.7067, 3.5253, 1.7913, 3.8740, 2.8477, 0.8493, 2.5975], + device='cuda:5'), covar=tensor([0.2228, 0.1850, 0.1536, 0.3340, 0.0932, 0.1008, 0.4455, 0.1419], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0161, 0.0129, 0.0160, 0.0123, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:51:21,266 INFO [finetune.py:976] (5/7) Epoch 19, batch 1450, loss[loss=0.202, simple_loss=0.2727, pruned_loss=0.06565, over 4869.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2533, pruned_loss=0.0565, over 954907.72 frames. ], batch size: 31, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:51:24,644 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.650e+02 1.913e+02 2.290e+02 4.485e+02, threshold=3.826e+02, percent-clipped=3.0 +2023-03-26 22:51:24,825 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8815, 1.3838, 1.9593, 1.8988, 1.7055, 1.6391, 1.8447, 1.7920], + device='cuda:5'), covar=tensor([0.4051, 0.4030, 0.3124, 0.3551, 0.4581, 0.3706, 0.4221, 0.3096], + device='cuda:5'), in_proj_covar=tensor([0.0251, 0.0242, 0.0260, 0.0278, 0.0276, 0.0250, 0.0285, 0.0242], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:51:35,738 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3529, 4.6532, 4.9323, 5.2419, 5.0951, 4.8640, 5.4993, 1.8967], + device='cuda:5'), covar=tensor([0.0697, 0.0934, 0.0836, 0.0907, 0.1103, 0.1470, 0.0525, 0.5804], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0242, 0.0279, 0.0288, 0.0331, 0.0280, 0.0301, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:51:42,883 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=104580.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:52:02,825 INFO [finetune.py:976] (5/7) Epoch 19, batch 1500, loss[loss=0.1863, simple_loss=0.2498, pruned_loss=0.06135, over 4059.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2537, pruned_loss=0.0567, over 954045.09 frames. ], batch size: 65, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:52:03,240 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-03-26 22:52:33,836 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104646.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:52:35,529 INFO [finetune.py:976] (5/7) Epoch 19, batch 1550, loss[loss=0.1543, simple_loss=0.2102, pruned_loss=0.04919, over 4835.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2529, pruned_loss=0.0562, over 955620.75 frames. ], batch size: 30, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:52:40,199 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.878e+01 1.494e+02 1.864e+02 2.283e+02 3.386e+02, threshold=3.728e+02, percent-clipped=0.0 +2023-03-26 22:52:40,321 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104653.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:52:42,726 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-26 22:53:26,187 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=104694.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:53:29,192 INFO [finetune.py:976] (5/7) Epoch 19, batch 1600, loss[loss=0.1676, simple_loss=0.2356, pruned_loss=0.04979, over 4818.00 frames. ], tot_loss[loss=0.1793, simple_loss=0.2495, pruned_loss=0.05452, over 954934.36 frames. ], batch size: 38, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:53:35,259 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=104701.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:53:38,279 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104705.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:53:51,377 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6934, 3.5574, 3.3747, 1.6581, 3.6635, 2.8192, 1.0440, 2.6995], + device='cuda:5'), covar=tensor([0.2289, 0.1869, 0.1545, 0.3411, 0.1121, 0.0978, 0.4218, 0.1368], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0176, 0.0160, 0.0128, 0.0160, 0.0123, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:53:58,690 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.38 vs. limit=5.0 +2023-03-26 22:54:01,667 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0170, 1.8325, 2.2574, 3.7647, 2.6621, 2.6425, 0.9784, 3.0917], + device='cuda:5'), covar=tensor([0.1535, 0.1336, 0.1355, 0.0555, 0.0651, 0.1874, 0.1788, 0.0413], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0115, 0.0131, 0.0162, 0.0097, 0.0134, 0.0122, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 22:54:02,331 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0511, 1.8139, 1.6688, 1.5796, 1.7961, 1.7703, 1.8066, 2.4473], + device='cuda:5'), covar=tensor([0.3587, 0.4277, 0.3105, 0.3706, 0.3993, 0.2462, 0.3682, 0.1758], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0261, 0.0230, 0.0275, 0.0251, 0.0221, 0.0251, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:54:09,602 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.7079, 1.8184, 1.8987, 0.9672, 1.9490, 2.1741, 2.0335, 1.6936], + device='cuda:5'), covar=tensor([0.0956, 0.0625, 0.0491, 0.0662, 0.0472, 0.0651, 0.0404, 0.0705], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0152, 0.0125, 0.0126, 0.0132, 0.0129, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.1676e-05, 1.0994e-04, 8.9412e-05, 8.9737e-05, 9.3005e-05, 9.2714e-05, + 1.0188e-04, 1.0683e-04], device='cuda:5') +2023-03-26 22:54:11,341 INFO [finetune.py:976] (5/7) Epoch 19, batch 1650, loss[loss=0.141, simple_loss=0.2156, pruned_loss=0.03323, over 4865.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2473, pruned_loss=0.05446, over 952462.50 frames. ], batch size: 34, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:54:13,774 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.513e+02 1.754e+02 2.121e+02 3.523e+02, threshold=3.508e+02, percent-clipped=0.0 +2023-03-26 22:54:13,852 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=104753.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 22:54:15,151 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0178, 1.9315, 1.6204, 1.7677, 1.9765, 1.7427, 2.1644, 2.0182], + device='cuda:5'), covar=tensor([0.1285, 0.1876, 0.2889, 0.2451, 0.2607, 0.1575, 0.3131, 0.1641], + device='cuda:5'), in_proj_covar=tensor([0.0185, 0.0188, 0.0235, 0.0254, 0.0246, 0.0203, 0.0214, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:54:20,496 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9425, 1.3845, 1.9814, 1.9325, 1.7429, 1.6802, 1.8206, 1.7998], + device='cuda:5'), covar=tensor([0.3876, 0.3769, 0.2925, 0.3268, 0.4243, 0.3516, 0.4076, 0.2886], + device='cuda:5'), in_proj_covar=tensor([0.0251, 0.0241, 0.0260, 0.0278, 0.0276, 0.0250, 0.0286, 0.0242], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:54:28,677 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8137, 3.6350, 3.5638, 2.0752, 3.7947, 2.9251, 1.0231, 2.7496], + device='cuda:5'), covar=tensor([0.2601, 0.2239, 0.1392, 0.3023, 0.1092, 0.1036, 0.4245, 0.1587], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0176, 0.0160, 0.0128, 0.0160, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:54:29,927 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6258, 1.6418, 1.6459, 0.8710, 1.7205, 1.9525, 1.9435, 1.4614], + device='cuda:5'), covar=tensor([0.1112, 0.0751, 0.0572, 0.0676, 0.0578, 0.0676, 0.0343, 0.0945], + device='cuda:5'), in_proj_covar=tensor([0.0125, 0.0151, 0.0125, 0.0126, 0.0132, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.1477e-05, 1.0959e-04, 8.9192e-05, 8.9388e-05, 9.2847e-05, 9.2401e-05, + 1.0155e-04, 1.0663e-04], device='cuda:5') +2023-03-26 22:54:44,701 INFO [finetune.py:976] (5/7) Epoch 19, batch 1700, loss[loss=0.144, simple_loss=0.2078, pruned_loss=0.04011, over 4910.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.2458, pruned_loss=0.05435, over 953510.15 frames. ], batch size: 36, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:55:17,898 INFO [finetune.py:976] (5/7) Epoch 19, batch 1750, loss[loss=0.217, simple_loss=0.2884, pruned_loss=0.07282, over 4761.00 frames. ], tot_loss[loss=0.1786, simple_loss=0.2472, pruned_loss=0.05497, over 954710.40 frames. ], batch size: 54, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:55:20,304 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.630e+02 1.888e+02 2.368e+02 5.925e+02, threshold=3.776e+02, percent-clipped=5.0 +2023-03-26 22:55:39,827 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5252, 1.4067, 1.9451, 1.8685, 1.6283, 3.6081, 1.3977, 1.6049], + device='cuda:5'), covar=tensor([0.1216, 0.2358, 0.1214, 0.1136, 0.1852, 0.0266, 0.2024, 0.2329], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:55:51,603 INFO [finetune.py:976] (5/7) Epoch 19, batch 1800, loss[loss=0.2037, simple_loss=0.2741, pruned_loss=0.06667, over 4908.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2517, pruned_loss=0.0561, over 957051.80 frames. ], batch size: 37, lr: 3.30e-03, grad_scale: 16.0 +2023-03-26 22:55:56,577 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7826, 1.3434, 0.7753, 1.5941, 2.0255, 1.3634, 1.6084, 1.5977], + device='cuda:5'), covar=tensor([0.1544, 0.2142, 0.2128, 0.1203, 0.2049, 0.2037, 0.1435, 0.2128], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0092, 0.0119, 0.0093, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 22:56:13,532 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4853, 1.4882, 1.2681, 1.5059, 1.7796, 1.7427, 1.5224, 1.3246], + device='cuda:5'), covar=tensor([0.0349, 0.0323, 0.0620, 0.0287, 0.0223, 0.0514, 0.0351, 0.0404], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0108, 0.0142, 0.0110, 0.0099, 0.0109, 0.0099, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4310e-05, 8.2902e-05, 1.1189e-04, 8.4876e-05, 7.7442e-05, 8.0557e-05, + 7.4147e-05, 8.4326e-05], device='cuda:5') +2023-03-26 22:56:18,867 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.25 vs. limit=5.0 +2023-03-26 22:56:25,142 INFO [finetune.py:976] (5/7) Epoch 19, batch 1850, loss[loss=0.1749, simple_loss=0.2524, pruned_loss=0.04866, over 4774.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2535, pruned_loss=0.05628, over 957939.41 frames. ], batch size: 51, lr: 3.30e-03, grad_scale: 32.0 +2023-03-26 22:56:27,538 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.864e+01 1.560e+02 1.785e+02 2.312e+02 4.235e+02, threshold=3.569e+02, percent-clipped=1.0 +2023-03-26 22:57:00,528 INFO [finetune.py:976] (5/7) Epoch 19, batch 1900, loss[loss=0.1722, simple_loss=0.2571, pruned_loss=0.04367, over 4904.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.2537, pruned_loss=0.05545, over 957352.86 frames. ], batch size: 36, lr: 3.30e-03, grad_scale: 32.0 +2023-03-26 22:57:03,214 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3534, 3.8043, 3.9667, 4.2098, 4.1128, 3.8421, 4.4487, 1.4508], + device='cuda:5'), covar=tensor([0.0771, 0.0795, 0.0817, 0.0890, 0.1279, 0.1704, 0.0701, 0.5581], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0242, 0.0280, 0.0291, 0.0333, 0.0282, 0.0302, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:57:11,599 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7060, 3.8284, 3.5626, 1.8922, 3.9916, 2.9240, 1.0412, 2.6953], + device='cuda:5'), covar=tensor([0.2369, 0.1832, 0.1407, 0.3048, 0.0898, 0.1027, 0.3912, 0.1313], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0177, 0.0160, 0.0128, 0.0160, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 22:57:27,072 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6841, 1.4716, 1.3883, 1.4941, 1.8954, 1.9642, 1.5812, 1.4169], + device='cuda:5'), covar=tensor([0.0272, 0.0367, 0.0578, 0.0317, 0.0222, 0.0406, 0.0369, 0.0406], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0107, 0.0142, 0.0110, 0.0099, 0.0109, 0.0099, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4213e-05, 8.2691e-05, 1.1174e-04, 8.4607e-05, 7.7214e-05, 8.0343e-05, + 7.3950e-05, 8.4134e-05], device='cuda:5') +2023-03-26 22:57:32,381 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5471, 1.6398, 2.2308, 1.9709, 1.9483, 4.3277, 1.5983, 1.8914], + device='cuda:5'), covar=tensor([0.0947, 0.1720, 0.1085, 0.0927, 0.1440, 0.0161, 0.1367, 0.1667], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 22:57:40,549 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1321, 2.1907, 2.3498, 0.9034, 2.6412, 2.8007, 2.4706, 2.0755], + device='cuda:5'), covar=tensor([0.0848, 0.0665, 0.0392, 0.0719, 0.0455, 0.0621, 0.0364, 0.0647], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0153, 0.0126, 0.0127, 0.0133, 0.0130, 0.0143, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.2161e-05, 1.1079e-04, 8.9895e-05, 9.0097e-05, 9.3537e-05, 9.3446e-05, + 1.0233e-04, 1.0749e-04], device='cuda:5') +2023-03-26 22:57:41,794 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2232, 2.0035, 1.8313, 1.9755, 1.9081, 1.9107, 1.9237, 2.6925], + device='cuda:5'), covar=tensor([0.3497, 0.4067, 0.3235, 0.3648, 0.3976, 0.2305, 0.3622, 0.1483], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0230, 0.0276, 0.0252, 0.0221, 0.0252, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:57:42,256 INFO [finetune.py:976] (5/7) Epoch 19, batch 1950, loss[loss=0.1734, simple_loss=0.235, pruned_loss=0.05586, over 4896.00 frames. ], tot_loss[loss=0.1806, simple_loss=0.2519, pruned_loss=0.05469, over 956098.55 frames. ], batch size: 36, lr: 3.30e-03, grad_scale: 32.0 +2023-03-26 22:57:44,664 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.372e+01 1.430e+02 1.759e+02 2.099e+02 5.293e+02, threshold=3.517e+02, percent-clipped=3.0 +2023-03-26 22:58:31,188 INFO [finetune.py:976] (5/7) Epoch 19, batch 2000, loss[loss=0.1841, simple_loss=0.2382, pruned_loss=0.06496, over 4834.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2488, pruned_loss=0.05388, over 954353.70 frames. ], batch size: 30, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 22:58:54,801 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.65 vs. limit=5.0 +2023-03-26 22:59:15,992 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8559, 1.2416, 1.9148, 1.8448, 1.6520, 1.5941, 1.7632, 1.8024], + device='cuda:5'), covar=tensor([0.3696, 0.3681, 0.3095, 0.3556, 0.4521, 0.3627, 0.4167, 0.2864], + device='cuda:5'), in_proj_covar=tensor([0.0251, 0.0241, 0.0260, 0.0278, 0.0277, 0.0251, 0.0286, 0.0243], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 22:59:17,046 INFO [finetune.py:976] (5/7) Epoch 19, batch 2050, loss[loss=0.1894, simple_loss=0.2529, pruned_loss=0.06297, over 4808.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2462, pruned_loss=0.05319, over 954641.13 frames. ], batch size: 45, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 22:59:19,901 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.050e+02 1.557e+02 1.803e+02 2.317e+02 4.729e+02, threshold=3.605e+02, percent-clipped=3.0 +2023-03-26 22:59:50,002 INFO [finetune.py:976] (5/7) Epoch 19, batch 2100, loss[loss=0.1567, simple_loss=0.2283, pruned_loss=0.04257, over 4750.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2455, pruned_loss=0.05312, over 953738.88 frames. ], batch size: 27, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:00:23,803 INFO [finetune.py:976] (5/7) Epoch 19, batch 2150, loss[loss=0.1787, simple_loss=0.2548, pruned_loss=0.05134, over 4764.00 frames. ], tot_loss[loss=0.1784, simple_loss=0.2483, pruned_loss=0.05423, over 953999.31 frames. ], batch size: 27, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:00:26,648 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.573e+02 1.937e+02 2.406e+02 5.182e+02, threshold=3.875e+02, percent-clipped=4.0 +2023-03-26 23:00:57,386 INFO [finetune.py:976] (5/7) Epoch 19, batch 2200, loss[loss=0.2158, simple_loss=0.2807, pruned_loss=0.07543, over 4683.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.2513, pruned_loss=0.05563, over 951764.87 frames. ], batch size: 59, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:01:08,785 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5444, 1.4005, 1.2656, 1.5098, 1.6443, 1.5525, 1.2531, 1.3268], + device='cuda:5'), covar=tensor([0.1973, 0.1914, 0.1742, 0.1467, 0.1483, 0.1149, 0.2176, 0.1687], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0208, 0.0212, 0.0191, 0.0241, 0.0186, 0.0213, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:01:28,456 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1108, 2.1378, 1.8302, 2.2093, 2.0295, 2.0303, 2.0450, 2.7776], + device='cuda:5'), covar=tensor([0.3720, 0.4992, 0.3567, 0.4227, 0.4661, 0.2502, 0.4725, 0.1620], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0263, 0.0231, 0.0278, 0.0253, 0.0222, 0.0253, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:01:30,612 INFO [finetune.py:976] (5/7) Epoch 19, batch 2250, loss[loss=0.2071, simple_loss=0.2779, pruned_loss=0.06815, over 4903.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.253, pruned_loss=0.05603, over 949932.07 frames. ], batch size: 37, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:01:33,465 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.629e+02 1.918e+02 2.372e+02 6.301e+02, threshold=3.835e+02, percent-clipped=3.0 +2023-03-26 23:01:49,039 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4828, 1.1665, 0.7671, 1.3988, 1.9376, 0.6756, 1.3120, 1.3837], + device='cuda:5'), covar=tensor([0.1689, 0.2166, 0.1817, 0.1251, 0.2035, 0.2063, 0.1518, 0.2131], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0090, 0.0118, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:01:50,275 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5054, 1.4206, 1.3056, 1.5436, 1.5012, 1.6093, 0.9712, 1.2910], + device='cuda:5'), covar=tensor([0.2346, 0.2130, 0.2063, 0.1728, 0.1826, 0.1255, 0.2686, 0.1991], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0208, 0.0211, 0.0191, 0.0241, 0.0185, 0.0213, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:01:56,184 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=105388.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:02:03,219 INFO [finetune.py:976] (5/7) Epoch 19, batch 2300, loss[loss=0.1811, simple_loss=0.2436, pruned_loss=0.05934, over 4881.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2534, pruned_loss=0.05557, over 953097.26 frames. ], batch size: 32, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:02:25,632 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.32 vs. limit=5.0 +2023-03-26 23:02:34,839 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4912, 3.3858, 3.1602, 1.5775, 3.5053, 2.4402, 0.7900, 2.2925], + device='cuda:5'), covar=tensor([0.2417, 0.1994, 0.1637, 0.3415, 0.1129, 0.1201, 0.4363, 0.1587], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0177, 0.0160, 0.0129, 0.0160, 0.0123, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:02:45,814 INFO [finetune.py:976] (5/7) Epoch 19, batch 2350, loss[loss=0.1947, simple_loss=0.25, pruned_loss=0.06969, over 4880.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2501, pruned_loss=0.05462, over 950593.73 frames. ], batch size: 34, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:02:45,935 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=105449.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 23:02:48,225 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.033e+02 1.493e+02 1.723e+02 2.054e+02 4.367e+02, threshold=3.447e+02, percent-clipped=1.0 +2023-03-26 23:03:10,221 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=105486.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:03:19,408 INFO [finetune.py:976] (5/7) Epoch 19, batch 2400, loss[loss=0.1516, simple_loss=0.2226, pruned_loss=0.04029, over 4856.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2473, pruned_loss=0.05402, over 952986.71 frames. ], batch size: 44, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:03:23,349 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=105502.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:03:46,603 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5231, 2.3774, 1.9746, 2.5312, 2.4835, 2.1815, 2.9437, 2.5371], + device='cuda:5'), covar=tensor([0.1225, 0.2105, 0.2791, 0.2526, 0.2297, 0.1482, 0.2851, 0.1731], + device='cuda:5'), in_proj_covar=tensor([0.0185, 0.0188, 0.0236, 0.0253, 0.0247, 0.0203, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:04:14,888 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=105547.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:04:15,986 INFO [finetune.py:976] (5/7) Epoch 19, batch 2450, loss[loss=0.1686, simple_loss=0.2302, pruned_loss=0.05348, over 4900.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2441, pruned_loss=0.05292, over 954441.81 frames. ], batch size: 36, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:04:18,404 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.415e+01 1.457e+02 1.742e+02 2.171e+02 6.143e+02, threshold=3.484e+02, percent-clipped=3.0 +2023-03-26 23:04:22,678 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9222, 1.4610, 0.9305, 1.8343, 2.2803, 1.4263, 1.8124, 1.6790], + device='cuda:5'), covar=tensor([0.1422, 0.2050, 0.1906, 0.1106, 0.1892, 0.1953, 0.1421, 0.2049], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0091, 0.0118, 0.0092, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:04:25,637 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=105563.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:04:28,754 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 23:04:49,908 INFO [finetune.py:976] (5/7) Epoch 19, batch 2500, loss[loss=0.1717, simple_loss=0.2359, pruned_loss=0.05378, over 4031.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2459, pruned_loss=0.05395, over 953322.24 frames. ], batch size: 17, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:04:52,032 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8154, 2.5604, 2.3291, 2.7173, 2.6418, 2.4978, 2.9567, 2.6984], + device='cuda:5'), covar=tensor([0.1058, 0.1803, 0.2505, 0.2124, 0.2076, 0.1412, 0.2541, 0.1587], + device='cuda:5'), in_proj_covar=tensor([0.0185, 0.0188, 0.0236, 0.0254, 0.0247, 0.0203, 0.0216, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:05:06,243 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1337, 1.2466, 1.3427, 0.6552, 1.2603, 1.5085, 1.6154, 1.2339], + device='cuda:5'), covar=tensor([0.0925, 0.0600, 0.0509, 0.0503, 0.0470, 0.0630, 0.0302, 0.0687], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0152, 0.0126, 0.0127, 0.0133, 0.0130, 0.0143, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.1987e-05, 1.1045e-04, 9.0062e-05, 9.0342e-05, 9.3511e-05, 9.3213e-05, + 1.0254e-04, 1.0727e-04], device='cuda:5') +2023-03-26 23:05:23,463 INFO [finetune.py:976] (5/7) Epoch 19, batch 2550, loss[loss=0.1774, simple_loss=0.2409, pruned_loss=0.05699, over 4323.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.25, pruned_loss=0.05546, over 951806.44 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:05:26,387 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.575e+02 1.924e+02 2.412e+02 4.379e+02, threshold=3.848e+02, percent-clipped=4.0 +2023-03-26 23:05:31,720 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-26 23:05:56,905 INFO [finetune.py:976] (5/7) Epoch 19, batch 2600, loss[loss=0.1748, simple_loss=0.2572, pruned_loss=0.0462, over 4829.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2514, pruned_loss=0.05537, over 949095.03 frames. ], batch size: 49, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:06:17,239 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-03-26 23:06:26,489 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7548, 3.9384, 3.6690, 1.8976, 4.1201, 2.9864, 0.7693, 2.8136], + device='cuda:5'), covar=tensor([0.2306, 0.1631, 0.1459, 0.3401, 0.0944, 0.1060, 0.4809, 0.1446], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0178, 0.0161, 0.0130, 0.0161, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:06:27,125 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105744.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 23:06:30,086 INFO [finetune.py:976] (5/7) Epoch 19, batch 2650, loss[loss=0.1977, simple_loss=0.2757, pruned_loss=0.05988, over 4738.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2536, pruned_loss=0.05584, over 951871.90 frames. ], batch size: 59, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:06:32,907 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.372e+01 1.545e+02 1.903e+02 2.181e+02 3.189e+02, threshold=3.806e+02, percent-clipped=0.0 +2023-03-26 23:06:59,545 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=105792.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:07:03,680 INFO [finetune.py:976] (5/7) Epoch 19, batch 2700, loss[loss=0.1968, simple_loss=0.2697, pruned_loss=0.06192, over 4896.00 frames. ], tot_loss[loss=0.18, simple_loss=0.2513, pruned_loss=0.05429, over 954081.80 frames. ], batch size: 32, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:07:03,794 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=105799.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:07:05,133 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6588, 1.4423, 1.1436, 1.3048, 1.7719, 1.8131, 1.5622, 1.3282], + device='cuda:5'), covar=tensor([0.0275, 0.0371, 0.0776, 0.0401, 0.0268, 0.0502, 0.0324, 0.0464], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0107, 0.0143, 0.0111, 0.0100, 0.0110, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.4556e-05, 8.2727e-05, 1.1270e-04, 8.4859e-05, 7.7622e-05, 8.1366e-05, + 7.4922e-05, 8.4701e-05], device='cuda:5') +2023-03-26 23:07:32,865 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9259, 1.8243, 2.2690, 1.4714, 2.1309, 2.2701, 1.7184, 2.4086], + device='cuda:5'), covar=tensor([0.1441, 0.2095, 0.1414, 0.2135, 0.1001, 0.1560, 0.2818, 0.0875], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0205, 0.0191, 0.0188, 0.0174, 0.0214, 0.0217, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:07:33,450 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105842.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:07:37,642 INFO [finetune.py:976] (5/7) Epoch 19, batch 2750, loss[loss=0.1657, simple_loss=0.2343, pruned_loss=0.04849, over 4812.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2483, pruned_loss=0.05397, over 955567.31 frames. ], batch size: 39, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:07:40,100 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.803e+01 1.395e+02 1.671e+02 1.966e+02 3.086e+02, threshold=3.343e+02, percent-clipped=0.0 +2023-03-26 23:07:40,235 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=105853.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:07:43,750 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105858.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:07:45,036 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=105860.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:07:57,402 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5827, 1.6003, 1.7391, 0.9342, 1.8312, 1.9713, 1.9368, 1.4852], + device='cuda:5'), covar=tensor([0.0985, 0.0639, 0.0495, 0.0553, 0.0434, 0.0706, 0.0372, 0.0726], + device='cuda:5'), in_proj_covar=tensor([0.0126, 0.0153, 0.0126, 0.0127, 0.0133, 0.0131, 0.0143, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.2089e-05, 1.1099e-04, 9.0297e-05, 9.0301e-05, 9.4075e-05, 9.3566e-05, + 1.0298e-04, 1.0719e-04], device='cuda:5') +2023-03-26 23:08:14,883 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-26 23:08:22,329 INFO [finetune.py:976] (5/7) Epoch 19, batch 2800, loss[loss=0.1922, simple_loss=0.2501, pruned_loss=0.06713, over 4862.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2455, pruned_loss=0.05315, over 957072.47 frames. ], batch size: 31, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:08:24,295 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2855, 1.9964, 1.7984, 2.0434, 1.8902, 2.0321, 1.9838, 2.7086], + device='cuda:5'), covar=tensor([0.3827, 0.4744, 0.3438, 0.4251, 0.4356, 0.2772, 0.4374, 0.1879], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0260, 0.0229, 0.0275, 0.0251, 0.0220, 0.0251, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:09:03,615 INFO [finetune.py:976] (5/7) Epoch 19, batch 2850, loss[loss=0.2092, simple_loss=0.2635, pruned_loss=0.07748, over 4868.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2444, pruned_loss=0.05338, over 956664.42 frames. ], batch size: 31, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:09:10,700 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.025e+01 1.444e+02 1.775e+02 2.176e+02 4.047e+02, threshold=3.549e+02, percent-clipped=5.0 +2023-03-26 23:09:18,281 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0982, 2.0079, 1.7328, 2.0421, 1.8459, 1.8619, 1.9296, 2.6353], + device='cuda:5'), covar=tensor([0.3677, 0.4073, 0.3178, 0.3712, 0.4079, 0.2416, 0.3756, 0.1589], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0259, 0.0229, 0.0274, 0.0251, 0.0220, 0.0251, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:09:36,423 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.77 vs. limit=5.0 +2023-03-26 23:09:49,339 INFO [finetune.py:976] (5/7) Epoch 19, batch 2900, loss[loss=0.251, simple_loss=0.3086, pruned_loss=0.09667, over 4763.00 frames. ], tot_loss[loss=0.178, simple_loss=0.2472, pruned_loss=0.05443, over 951159.61 frames. ], batch size: 54, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:10:21,005 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106044.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:10:24,380 INFO [finetune.py:976] (5/7) Epoch 19, batch 2950, loss[loss=0.1459, simple_loss=0.2382, pruned_loss=0.02682, over 4816.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.2501, pruned_loss=0.05477, over 949777.38 frames. ], batch size: 40, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:10:27,321 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.599e+02 1.865e+02 2.251e+02 4.962e+02, threshold=3.729e+02, percent-clipped=1.0 +2023-03-26 23:10:32,859 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1132, 2.0369, 1.6804, 2.0918, 2.0753, 1.7897, 2.3789, 2.1223], + device='cuda:5'), covar=tensor([0.1486, 0.2070, 0.3020, 0.2458, 0.2527, 0.1713, 0.2908, 0.1766], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0189, 0.0238, 0.0256, 0.0249, 0.0205, 0.0217, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:10:35,705 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.76 vs. limit=5.0 +2023-03-26 23:10:43,287 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106077.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:10:53,299 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=106092.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:10:57,525 INFO [finetune.py:976] (5/7) Epoch 19, batch 3000, loss[loss=0.1833, simple_loss=0.2572, pruned_loss=0.05467, over 4918.00 frames. ], tot_loss[loss=0.181, simple_loss=0.2514, pruned_loss=0.05527, over 951050.91 frames. ], batch size: 42, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:10:57,525 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 23:11:02,144 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2855, 2.1354, 1.6380, 0.6615, 1.7450, 1.9605, 1.8311, 1.9485], + device='cuda:5'), covar=tensor([0.0901, 0.0733, 0.1219, 0.1855, 0.1208, 0.2094, 0.1982, 0.0711], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0199, 0.0182, 0.0210, 0.0207, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:11:08,360 INFO [finetune.py:1010] (5/7) Epoch 19, validation: loss=0.1576, simple_loss=0.2259, pruned_loss=0.04462, over 2265189.00 frames. +2023-03-26 23:11:08,360 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 23:11:43,856 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106138.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:11:46,254 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106142.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:11:50,370 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106148.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:11:50,871 INFO [finetune.py:976] (5/7) Epoch 19, batch 3050, loss[loss=0.1892, simple_loss=0.2577, pruned_loss=0.0603, over 4798.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.2517, pruned_loss=0.05463, over 951877.30 frames. ], batch size: 29, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:11:53,804 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.036e+02 1.577e+02 1.927e+02 2.196e+02 3.458e+02, threshold=3.854e+02, percent-clipped=0.0 +2023-03-26 23:11:55,079 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106155.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:11:56,166 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4466, 1.3612, 1.5717, 2.4352, 1.6463, 2.1136, 0.9330, 2.0164], + device='cuda:5'), covar=tensor([0.1575, 0.1234, 0.1011, 0.0639, 0.0805, 0.1261, 0.1334, 0.0602], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0163, 0.0099, 0.0135, 0.0123, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:11:57,424 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106158.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:12:18,601 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=106190.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:12:24,004 INFO [finetune.py:976] (5/7) Epoch 19, batch 3100, loss[loss=0.1718, simple_loss=0.2376, pruned_loss=0.05299, over 4717.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2484, pruned_loss=0.05332, over 952399.40 frames. ], batch size: 59, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:12:29,243 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=106206.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:12:57,682 INFO [finetune.py:976] (5/7) Epoch 19, batch 3150, loss[loss=0.1723, simple_loss=0.236, pruned_loss=0.05433, over 4779.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2474, pruned_loss=0.05387, over 953732.09 frames. ], batch size: 28, lr: 3.29e-03, grad_scale: 32.0 +2023-03-26 23:13:00,116 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.828e+01 1.675e+02 1.879e+02 2.192e+02 3.916e+02, threshold=3.758e+02, percent-clipped=1.0 +2023-03-26 23:13:20,400 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106268.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:13:41,055 INFO [finetune.py:976] (5/7) Epoch 19, batch 3200, loss[loss=0.1542, simple_loss=0.2205, pruned_loss=0.04395, over 4224.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2444, pruned_loss=0.05313, over 954024.72 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:13:58,437 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9569, 1.5359, 0.8961, 1.6550, 2.1666, 1.3834, 1.6340, 1.6343], + device='cuda:5'), covar=tensor([0.1410, 0.1909, 0.1853, 0.1183, 0.1881, 0.1943, 0.1395, 0.2023], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0091, 0.0119, 0.0092, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:14:00,211 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6801, 1.5894, 1.5343, 1.7834, 1.9614, 1.7568, 1.3713, 1.4733], + device='cuda:5'), covar=tensor([0.1921, 0.1833, 0.1688, 0.1460, 0.1519, 0.1142, 0.2326, 0.1688], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0209, 0.0211, 0.0192, 0.0241, 0.0186, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:14:01,446 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106329.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:14:05,725 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106336.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:14:06,940 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8019, 1.3215, 0.9145, 1.5613, 2.1390, 1.0410, 1.3850, 1.4870], + device='cuda:5'), covar=tensor([0.1167, 0.1708, 0.1547, 0.1007, 0.1510, 0.1837, 0.1314, 0.1690], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0090, 0.0119, 0.0092, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:14:06,950 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3970, 1.4168, 1.5376, 1.5951, 1.6447, 2.8477, 1.3707, 1.5898], + device='cuda:5'), covar=tensor([0.0847, 0.1474, 0.0938, 0.0773, 0.1291, 0.0297, 0.1224, 0.1363], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-26 23:14:16,904 INFO [finetune.py:976] (5/7) Epoch 19, batch 3250, loss[loss=0.1204, simple_loss=0.196, pruned_loss=0.02241, over 4157.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2449, pruned_loss=0.05367, over 951011.66 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:14:24,771 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.520e+02 1.839e+02 2.222e+02 4.428e+02, threshold=3.677e+02, percent-clipped=2.0 +2023-03-26 23:15:08,397 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106397.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:15:10,069 INFO [finetune.py:976] (5/7) Epoch 19, batch 3300, loss[loss=0.1741, simple_loss=0.2575, pruned_loss=0.04532, over 4880.00 frames. ], tot_loss[loss=0.1801, simple_loss=0.2495, pruned_loss=0.05538, over 951255.83 frames. ], batch size: 32, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:15:25,641 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7075, 4.2362, 4.1484, 2.0385, 4.3807, 3.4739, 0.9825, 2.9901], + device='cuda:5'), covar=tensor([0.2779, 0.1667, 0.1202, 0.3100, 0.0765, 0.0747, 0.4063, 0.1321], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0129, 0.0161, 0.0123, 0.0147, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:15:32,153 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1943, 1.2536, 1.2869, 0.7538, 1.2510, 1.4596, 1.5885, 1.2026], + device='cuda:5'), covar=tensor([0.0853, 0.0504, 0.0508, 0.0418, 0.0489, 0.0525, 0.0259, 0.0565], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0151, 0.0125, 0.0125, 0.0131, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.1067e-05, 1.0947e-04, 8.9160e-05, 8.8760e-05, 9.2388e-05, 9.2634e-05, + 1.0089e-04, 1.0602e-04], device='cuda:5') +2023-03-26 23:15:36,590 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 23:15:41,465 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106433.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:15:51,016 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106448.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:15:51,547 INFO [finetune.py:976] (5/7) Epoch 19, batch 3350, loss[loss=0.1814, simple_loss=0.2553, pruned_loss=0.05376, over 4896.00 frames. ], tot_loss[loss=0.1807, simple_loss=0.2508, pruned_loss=0.05531, over 949788.30 frames. ], batch size: 37, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:15:54,464 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.616e+02 1.883e+02 2.222e+02 4.657e+02, threshold=3.766e+02, percent-clipped=2.0 +2023-03-26 23:15:55,202 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106454.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:15:56,244 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106455.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:16:31,243 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=106496.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:16:33,552 INFO [finetune.py:976] (5/7) Epoch 19, batch 3400, loss[loss=0.1506, simple_loss=0.2306, pruned_loss=0.03526, over 4788.00 frames. ], tot_loss[loss=0.1812, simple_loss=0.2515, pruned_loss=0.05549, over 949585.28 frames. ], batch size: 29, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:16:35,503 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5415, 1.6499, 2.1566, 3.5124, 2.3629, 2.4183, 1.0994, 2.8545], + device='cuda:5'), covar=tensor([0.1772, 0.1336, 0.1305, 0.0499, 0.0730, 0.1479, 0.1748, 0.0431], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0164, 0.0100, 0.0136, 0.0124, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:16:36,559 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=106503.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:16:39,078 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3056, 1.7501, 1.3588, 1.5355, 1.9262, 1.9157, 1.6675, 1.7335], + device='cuda:5'), covar=tensor([0.0611, 0.0314, 0.0526, 0.0312, 0.0275, 0.0525, 0.0358, 0.0318], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0108, 0.0144, 0.0111, 0.0100, 0.0110, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5370e-05, 8.3111e-05, 1.1365e-04, 8.5406e-05, 7.7860e-05, 8.1629e-05, + 7.5001e-05, 8.5210e-05], device='cuda:5') +2023-03-26 23:16:44,447 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106515.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:16:46,888 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8763, 1.7413, 1.6369, 1.9638, 2.1191, 1.9783, 1.4253, 1.6195], + device='cuda:5'), covar=tensor([0.2137, 0.1934, 0.1838, 0.1521, 0.1573, 0.1038, 0.2416, 0.1870], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0209, 0.0211, 0.0192, 0.0241, 0.0187, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:16:47,447 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6651, 3.8737, 3.6695, 1.7012, 4.0662, 3.1085, 0.9881, 2.7525], + device='cuda:5'), covar=tensor([0.2490, 0.1722, 0.1500, 0.3260, 0.0995, 0.0894, 0.4168, 0.1207], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0129, 0.0160, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:17:06,743 INFO [finetune.py:976] (5/7) Epoch 19, batch 3450, loss[loss=0.1961, simple_loss=0.2689, pruned_loss=0.06169, over 4821.00 frames. ], tot_loss[loss=0.1801, simple_loss=0.2505, pruned_loss=0.05483, over 950870.99 frames. ], batch size: 33, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:17:09,624 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.792e+01 1.525e+02 1.781e+02 2.060e+02 3.433e+02, threshold=3.562e+02, percent-clipped=0.0 +2023-03-26 23:17:09,781 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8703, 1.7719, 1.5441, 1.9184, 2.2656, 1.9141, 1.4883, 1.4857], + device='cuda:5'), covar=tensor([0.2032, 0.1853, 0.1851, 0.1537, 0.1580, 0.1171, 0.2375, 0.1887], + device='cuda:5'), in_proj_covar=tensor([0.0240, 0.0208, 0.0210, 0.0191, 0.0240, 0.0186, 0.0213, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:17:19,206 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5686, 3.8679, 3.6271, 1.7725, 4.0498, 3.1348, 1.0356, 2.8186], + device='cuda:5'), covar=tensor([0.2634, 0.1945, 0.1605, 0.3512, 0.1038, 0.0945, 0.4450, 0.1344], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0129, 0.0160, 0.0123, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:17:29,736 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9084, 1.3985, 0.8923, 1.6875, 2.1751, 1.3698, 1.5681, 1.7340], + device='cuda:5'), covar=tensor([0.1415, 0.1886, 0.1877, 0.1093, 0.1891, 0.1910, 0.1350, 0.1881], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0091, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:17:40,388 INFO [finetune.py:976] (5/7) Epoch 19, batch 3500, loss[loss=0.1568, simple_loss=0.2213, pruned_loss=0.04616, over 4734.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2492, pruned_loss=0.05501, over 952240.64 frames. ], batch size: 27, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:17:57,674 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106624.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:18:14,144 INFO [finetune.py:976] (5/7) Epoch 19, batch 3550, loss[loss=0.1591, simple_loss=0.2379, pruned_loss=0.04017, over 4899.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2476, pruned_loss=0.05433, over 954552.31 frames. ], batch size: 32, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:18:16,540 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.026e+02 1.567e+02 1.861e+02 2.307e+02 3.604e+02, threshold=3.722e+02, percent-clipped=2.0 +2023-03-26 23:18:26,109 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.56 vs. limit=5.0 +2023-03-26 23:18:51,976 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106692.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:18:56,156 INFO [finetune.py:976] (5/7) Epoch 19, batch 3600, loss[loss=0.2114, simple_loss=0.2679, pruned_loss=0.07742, over 4917.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2457, pruned_loss=0.05415, over 955362.28 frames. ], batch size: 36, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:19:19,202 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106733.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:19:29,859 INFO [finetune.py:976] (5/7) Epoch 19, batch 3650, loss[loss=0.1576, simple_loss=0.2403, pruned_loss=0.03751, over 4936.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2465, pruned_loss=0.05413, over 954983.44 frames. ], batch size: 33, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:19:34,983 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.609e+02 2.013e+02 2.438e+02 4.457e+02, threshold=4.025e+02, percent-clipped=1.0 +2023-03-26 23:19:54,852 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.9031, 4.3237, 4.4713, 4.7979, 4.6350, 4.3107, 5.0590, 1.5445], + device='cuda:5'), covar=tensor([0.0767, 0.0803, 0.0766, 0.0963, 0.1288, 0.1559, 0.0530, 0.5725], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0241, 0.0278, 0.0289, 0.0330, 0.0281, 0.0300, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:20:03,525 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=106781.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:20:23,369 INFO [finetune.py:976] (5/7) Epoch 19, batch 3700, loss[loss=0.1664, simple_loss=0.2415, pruned_loss=0.04564, over 4816.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.2502, pruned_loss=0.05475, over 955652.70 frames. ], batch size: 30, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:20:32,850 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106810.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:20:59,512 INFO [finetune.py:976] (5/7) Epoch 19, batch 3750, loss[loss=0.2153, simple_loss=0.2817, pruned_loss=0.07451, over 4898.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2519, pruned_loss=0.05547, over 955407.08 frames. ], batch size: 36, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:21:00,537 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-26 23:21:06,524 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.604e+02 1.833e+02 2.350e+02 4.465e+02, threshold=3.666e+02, percent-clipped=2.0 +2023-03-26 23:21:09,907 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.52 vs. limit=5.0 +2023-03-26 23:21:48,086 INFO [finetune.py:976] (5/7) Epoch 19, batch 3800, loss[loss=0.2311, simple_loss=0.2927, pruned_loss=0.08478, over 4831.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2526, pruned_loss=0.05591, over 953589.12 frames. ], batch size: 49, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:22:07,684 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106924.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:22:11,219 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6074, 1.6803, 1.8065, 1.0131, 1.8484, 2.0464, 2.0516, 1.6054], + device='cuda:5'), covar=tensor([0.0877, 0.0684, 0.0560, 0.0576, 0.0542, 0.0614, 0.0344, 0.0759], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0151, 0.0125, 0.0125, 0.0131, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.1015e-05, 1.0912e-04, 8.9505e-05, 8.8287e-05, 9.2213e-05, 9.2342e-05, + 1.0128e-04, 1.0611e-04], device='cuda:5') +2023-03-26 23:22:24,705 INFO [finetune.py:976] (5/7) Epoch 19, batch 3850, loss[loss=0.1759, simple_loss=0.245, pruned_loss=0.05344, over 4718.00 frames. ], tot_loss[loss=0.1799, simple_loss=0.2503, pruned_loss=0.0548, over 953456.64 frames. ], batch size: 54, lr: 3.28e-03, grad_scale: 64.0 +2023-03-26 23:22:27,159 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.119e+02 1.623e+02 1.818e+02 2.255e+02 6.115e+02, threshold=3.637e+02, percent-clipped=1.0 +2023-03-26 23:22:31,520 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4598, 1.3378, 1.6604, 2.4899, 1.6859, 2.1217, 0.9788, 2.1728], + device='cuda:5'), covar=tensor([0.1627, 0.1394, 0.1076, 0.0673, 0.0866, 0.1111, 0.1467, 0.0557], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0163, 0.0100, 0.0136, 0.0123, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:22:39,187 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=106972.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:22:52,770 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106992.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:22:57,317 INFO [finetune.py:976] (5/7) Epoch 19, batch 3900, loss[loss=0.1599, simple_loss=0.2357, pruned_loss=0.04202, over 4918.00 frames. ], tot_loss[loss=0.1785, simple_loss=0.248, pruned_loss=0.05456, over 951153.72 frames. ], batch size: 37, lr: 3.28e-03, grad_scale: 64.0 +2023-03-26 23:23:24,565 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=107040.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:23:29,940 INFO [finetune.py:976] (5/7) Epoch 19, batch 3950, loss[loss=0.146, simple_loss=0.2169, pruned_loss=0.03754, over 4790.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.245, pruned_loss=0.05382, over 952081.39 frames. ], batch size: 29, lr: 3.28e-03, grad_scale: 64.0 +2023-03-26 23:23:35,024 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.301e+01 1.519e+02 1.802e+02 2.250e+02 5.271e+02, threshold=3.605e+02, percent-clipped=1.0 +2023-03-26 23:23:35,405 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-03-26 23:23:45,027 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3893, 1.2894, 1.2637, 1.3252, 1.6144, 1.5559, 1.3481, 1.1862], + device='cuda:5'), covar=tensor([0.0341, 0.0309, 0.0625, 0.0320, 0.0265, 0.0512, 0.0376, 0.0410], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0107, 0.0144, 0.0111, 0.0100, 0.0110, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.4872e-05, 8.2461e-05, 1.1351e-04, 8.5435e-05, 7.7767e-05, 8.1699e-05, + 7.4284e-05, 8.4770e-05], device='cuda:5') +2023-03-26 23:24:07,129 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6824, 2.5495, 2.0831, 2.7477, 2.6690, 2.4882, 3.1753, 2.8180], + device='cuda:5'), covar=tensor([0.1360, 0.2222, 0.3200, 0.2613, 0.2589, 0.1559, 0.3065, 0.1704], + device='cuda:5'), in_proj_covar=tensor([0.0185, 0.0188, 0.0236, 0.0253, 0.0248, 0.0203, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:24:12,974 INFO [finetune.py:976] (5/7) Epoch 19, batch 4000, loss[loss=0.1982, simple_loss=0.2849, pruned_loss=0.0558, over 4818.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2449, pruned_loss=0.05376, over 950793.66 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 64.0 +2023-03-26 23:24:21,378 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107110.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:24:21,454 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9528, 1.3859, 1.9417, 1.9839, 1.7380, 1.6757, 1.8963, 1.7950], + device='cuda:5'), covar=tensor([0.3447, 0.3529, 0.3133, 0.3167, 0.4236, 0.3303, 0.4094, 0.2927], + device='cuda:5'), in_proj_covar=tensor([0.0250, 0.0239, 0.0259, 0.0277, 0.0274, 0.0250, 0.0284, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:24:41,104 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107140.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:24:46,975 INFO [finetune.py:976] (5/7) Epoch 19, batch 4050, loss[loss=0.1943, simple_loss=0.264, pruned_loss=0.06228, over 4838.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2467, pruned_loss=0.05383, over 949778.73 frames. ], batch size: 30, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:24:48,811 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107152.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:24:49,865 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.013e+02 1.579e+02 1.895e+02 2.231e+02 3.900e+02, threshold=3.790e+02, percent-clipped=1.0 +2023-03-26 23:24:52,896 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=107158.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:24:56,937 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6732, 1.5139, 1.1180, 0.2209, 1.1814, 1.4324, 1.4056, 1.2974], + device='cuda:5'), covar=tensor([0.0938, 0.0854, 0.1413, 0.2137, 0.1463, 0.2492, 0.2383, 0.0931], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0199, 0.0181, 0.0209, 0.0207, 0.0222, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:25:40,275 INFO [finetune.py:976] (5/7) Epoch 19, batch 4100, loss[loss=0.159, simple_loss=0.2361, pruned_loss=0.04093, over 4777.00 frames. ], tot_loss[loss=0.178, simple_loss=0.248, pruned_loss=0.05401, over 950369.95 frames. ], batch size: 29, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:25:41,781 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107201.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:25:50,475 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107213.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 23:26:13,433 INFO [finetune.py:976] (5/7) Epoch 19, batch 4150, loss[loss=0.2314, simple_loss=0.286, pruned_loss=0.08843, over 4090.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.2504, pruned_loss=0.05514, over 949713.52 frames. ], batch size: 65, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:26:21,810 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.077e+02 1.570e+02 1.970e+02 2.461e+02 5.293e+02, threshold=3.939e+02, percent-clipped=1.0 +2023-03-26 23:26:56,266 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9712, 0.9610, 0.9125, 1.0837, 1.1234, 1.0895, 0.9818, 0.8908], + device='cuda:5'), covar=tensor([0.0409, 0.0284, 0.0640, 0.0290, 0.0241, 0.0379, 0.0321, 0.0365], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0107, 0.0144, 0.0111, 0.0099, 0.0110, 0.0099, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.4905e-05, 8.2221e-05, 1.1333e-04, 8.5279e-05, 7.7218e-05, 8.1556e-05, + 7.3972e-05, 8.4709e-05], device='cuda:5') +2023-03-26 23:26:56,743 INFO [finetune.py:976] (5/7) Epoch 19, batch 4200, loss[loss=0.2029, simple_loss=0.2735, pruned_loss=0.06614, over 4757.00 frames. ], tot_loss[loss=0.1791, simple_loss=0.2497, pruned_loss=0.05427, over 951962.23 frames. ], batch size: 54, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:27:29,945 INFO [finetune.py:976] (5/7) Epoch 19, batch 4250, loss[loss=0.1664, simple_loss=0.2341, pruned_loss=0.04932, over 4770.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.2471, pruned_loss=0.05353, over 953652.31 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:27:33,461 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.503e+02 1.795e+02 2.146e+02 3.676e+02, threshold=3.590e+02, percent-clipped=0.0 +2023-03-26 23:28:03,397 INFO [finetune.py:976] (5/7) Epoch 19, batch 4300, loss[loss=0.1774, simple_loss=0.2409, pruned_loss=0.05698, over 4775.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2456, pruned_loss=0.05329, over 953548.97 frames. ], batch size: 29, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:28:25,971 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0354, 1.8773, 2.4838, 1.6403, 2.3125, 2.4973, 1.7659, 2.6101], + device='cuda:5'), covar=tensor([0.1524, 0.2282, 0.1710, 0.2360, 0.0930, 0.1383, 0.2912, 0.0909], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0205, 0.0191, 0.0189, 0.0174, 0.0213, 0.0218, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:28:29,022 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4646, 2.3183, 1.8559, 0.8401, 1.8903, 1.9794, 1.7792, 1.9577], + device='cuda:5'), covar=tensor([0.0795, 0.0701, 0.1424, 0.1943, 0.1417, 0.2162, 0.2220, 0.0929], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0193, 0.0200, 0.0182, 0.0210, 0.0207, 0.0222, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:28:36,190 INFO [finetune.py:976] (5/7) Epoch 19, batch 4350, loss[loss=0.1448, simple_loss=0.2281, pruned_loss=0.03078, over 4903.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2426, pruned_loss=0.05175, over 954279.82 frames. ], batch size: 35, lr: 3.28e-03, grad_scale: 32.0 +2023-03-26 23:28:40,180 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.532e+02 1.813e+02 2.231e+02 3.395e+02, threshold=3.625e+02, percent-clipped=1.0 +2023-03-26 23:29:00,207 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2815, 2.0702, 2.1381, 0.9787, 2.4608, 2.6787, 2.2079, 1.9546], + device='cuda:5'), covar=tensor([0.0916, 0.0767, 0.0479, 0.0709, 0.0476, 0.0544, 0.0435, 0.0895], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0124, 0.0124, 0.0131, 0.0127, 0.0141, 0.0146], + device='cuda:5'), out_proj_covar=tensor([9.0153e-05, 1.0824e-04, 8.9036e-05, 8.8087e-05, 9.1971e-05, 9.1212e-05, + 1.0098e-04, 1.0529e-04], device='cuda:5') +2023-03-26 23:29:15,226 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7894, 1.5928, 2.3024, 3.4172, 2.2550, 2.6035, 1.1251, 2.8541], + device='cuda:5'), covar=tensor([0.1601, 0.1379, 0.1234, 0.0537, 0.0830, 0.1137, 0.1803, 0.0468], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0114, 0.0132, 0.0162, 0.0099, 0.0134, 0.0123, 0.0098], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:29:21,287 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107496.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 23:29:23,019 INFO [finetune.py:976] (5/7) Epoch 19, batch 4400, loss[loss=0.1978, simple_loss=0.2564, pruned_loss=0.06965, over 4928.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2432, pruned_loss=0.05219, over 953363.25 frames. ], batch size: 38, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:29:29,601 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107508.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 23:29:31,356 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107510.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:29:56,828 INFO [finetune.py:976] (5/7) Epoch 19, batch 4450, loss[loss=0.1617, simple_loss=0.2423, pruned_loss=0.04059, over 4815.00 frames. ], tot_loss[loss=0.1765, simple_loss=0.2466, pruned_loss=0.0532, over 955063.56 frames. ], batch size: 40, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:29:59,906 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.983e+01 1.601e+02 1.972e+02 2.467e+02 3.942e+02, threshold=3.944e+02, percent-clipped=4.0 +2023-03-26 23:30:12,260 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107571.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:30:42,901 INFO [finetune.py:976] (5/7) Epoch 19, batch 4500, loss[loss=0.1641, simple_loss=0.2334, pruned_loss=0.04744, over 4896.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.2487, pruned_loss=0.05384, over 956082.85 frames. ], batch size: 35, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:31:11,053 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6455, 3.7041, 3.4742, 1.8115, 3.7822, 2.7928, 0.7146, 2.5352], + device='cuda:5'), covar=tensor([0.2393, 0.1389, 0.1401, 0.3072, 0.0933, 0.0986, 0.4368, 0.1412], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0176, 0.0160, 0.0130, 0.0161, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:31:25,201 INFO [finetune.py:976] (5/7) Epoch 19, batch 4550, loss[loss=0.242, simple_loss=0.3052, pruned_loss=0.08941, over 4916.00 frames. ], tot_loss[loss=0.1806, simple_loss=0.2513, pruned_loss=0.05498, over 954125.70 frames. ], batch size: 36, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:31:28,200 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.552e+02 1.832e+02 2.186e+02 5.352e+02, threshold=3.664e+02, percent-clipped=1.0 +2023-03-26 23:31:31,364 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107659.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:32:09,828 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.8027, 1.6021, 1.7284, 1.0582, 1.9106, 2.0022, 2.0387, 1.5737], + device='cuda:5'), covar=tensor([0.0990, 0.1022, 0.0565, 0.0636, 0.0492, 0.0817, 0.0423, 0.0877], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0125, 0.0125, 0.0131, 0.0128, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.0621e-05, 1.0855e-04, 8.9061e-05, 8.8315e-05, 9.2175e-05, 9.1431e-05, + 1.0139e-04, 1.0549e-04], device='cuda:5') +2023-03-26 23:32:12,114 INFO [finetune.py:976] (5/7) Epoch 19, batch 4600, loss[loss=0.1815, simple_loss=0.2448, pruned_loss=0.05916, over 4924.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.2504, pruned_loss=0.05467, over 954306.54 frames. ], batch size: 33, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:32:20,545 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1267, 1.4261, 0.7699, 1.9554, 2.4713, 1.7657, 1.6033, 1.8754], + device='cuda:5'), covar=tensor([0.1362, 0.1889, 0.2123, 0.1145, 0.1810, 0.1942, 0.1390, 0.1882], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0090, 0.0119, 0.0092, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:32:26,326 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107720.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:32:37,573 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107737.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:32:45,690 INFO [finetune.py:976] (5/7) Epoch 19, batch 4650, loss[loss=0.133, simple_loss=0.2024, pruned_loss=0.03183, over 4679.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.248, pruned_loss=0.05418, over 956448.11 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:32:47,057 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6475, 1.6523, 1.6788, 1.0845, 1.7145, 1.9962, 1.9397, 1.4566], + device='cuda:5'), covar=tensor([0.0885, 0.0681, 0.0504, 0.0478, 0.0400, 0.0618, 0.0308, 0.0756], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0125, 0.0125, 0.0131, 0.0128, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.0675e-05, 1.0843e-04, 8.9138e-05, 8.8341e-05, 9.2202e-05, 9.1490e-05, + 1.0142e-04, 1.0554e-04], device='cuda:5') +2023-03-26 23:32:48,739 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.855e+01 1.504e+02 1.713e+02 2.086e+02 4.043e+02, threshold=3.426e+02, percent-clipped=2.0 +2023-03-26 23:33:17,175 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107796.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 23:33:18,396 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107798.0, num_to_drop=1, layers_to_drop={3} +2023-03-26 23:33:19,351 INFO [finetune.py:976] (5/7) Epoch 19, batch 4700, loss[loss=0.1988, simple_loss=0.2493, pruned_loss=0.07416, over 4915.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2454, pruned_loss=0.0535, over 957627.25 frames. ], batch size: 46, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:33:25,045 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107808.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:33:49,093 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=107844.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 23:33:54,088 INFO [finetune.py:976] (5/7) Epoch 19, batch 4750, loss[loss=0.1828, simple_loss=0.2555, pruned_loss=0.05504, over 4929.00 frames. ], tot_loss[loss=0.1765, simple_loss=0.2454, pruned_loss=0.05373, over 958273.24 frames. ], batch size: 33, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:33:57,616 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.048e+02 1.438e+02 1.688e+02 2.143e+02 3.806e+02, threshold=3.376e+02, percent-clipped=2.0 +2023-03-26 23:33:58,880 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=107856.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:34:04,996 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107866.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:34:37,227 INFO [finetune.py:976] (5/7) Epoch 19, batch 4800, loss[loss=0.216, simple_loss=0.2808, pruned_loss=0.07561, over 4740.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2476, pruned_loss=0.0543, over 956385.86 frames. ], batch size: 54, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:35:10,747 INFO [finetune.py:976] (5/7) Epoch 19, batch 4850, loss[loss=0.1726, simple_loss=0.2492, pruned_loss=0.04805, over 4816.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.2503, pruned_loss=0.05464, over 956284.49 frames. ], batch size: 40, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:35:13,744 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.610e+02 1.895e+02 2.225e+02 4.035e+02, threshold=3.790e+02, percent-clipped=2.0 +2023-03-26 23:35:17,857 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5177, 3.4912, 3.3605, 1.7480, 3.7023, 2.6920, 0.9149, 2.5225], + device='cuda:5'), covar=tensor([0.2907, 0.2138, 0.1549, 0.3087, 0.1068, 0.1077, 0.3992, 0.1439], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0177, 0.0160, 0.0130, 0.0160, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:35:29,654 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.93 vs. limit=5.0 +2023-03-26 23:35:45,814 INFO [finetune.py:976] (5/7) Epoch 19, batch 4900, loss[loss=0.1504, simple_loss=0.2236, pruned_loss=0.03857, over 4745.00 frames. ], tot_loss[loss=0.1812, simple_loss=0.2521, pruned_loss=0.05513, over 957333.67 frames. ], batch size: 26, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:35:56,299 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-26 23:35:57,377 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108015.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:35:58,175 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-26 23:36:28,166 INFO [finetune.py:976] (5/7) Epoch 19, batch 4950, loss[loss=0.167, simple_loss=0.2458, pruned_loss=0.0441, over 4920.00 frames. ], tot_loss[loss=0.1817, simple_loss=0.2532, pruned_loss=0.05516, over 957517.77 frames. ], batch size: 38, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:36:31,629 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.572e+02 1.807e+02 2.323e+02 4.539e+02, threshold=3.614e+02, percent-clipped=1.0 +2023-03-26 23:37:04,012 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108093.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:37:10,959 INFO [finetune.py:976] (5/7) Epoch 19, batch 5000, loss[loss=0.1852, simple_loss=0.2467, pruned_loss=0.06183, over 4841.00 frames. ], tot_loss[loss=0.1788, simple_loss=0.25, pruned_loss=0.05383, over 957832.28 frames. ], batch size: 49, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:37:43,660 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9594, 0.9732, 0.9112, 1.0655, 1.1507, 1.0788, 0.9594, 0.8884], + device='cuda:5'), covar=tensor([0.0374, 0.0328, 0.0645, 0.0336, 0.0285, 0.0479, 0.0399, 0.0416], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0107, 0.0143, 0.0111, 0.0099, 0.0110, 0.0099, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4433e-05, 8.2114e-05, 1.1276e-04, 8.4994e-05, 7.6795e-05, 8.1093e-05, + 7.3498e-05, 8.4407e-05], device='cuda:5') +2023-03-26 23:37:54,036 INFO [finetune.py:976] (5/7) Epoch 19, batch 5050, loss[loss=0.1638, simple_loss=0.2393, pruned_loss=0.04417, over 4815.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.2473, pruned_loss=0.05305, over 957176.25 frames. ], batch size: 41, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:37:57,572 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.577e+02 1.863e+02 2.132e+02 3.762e+02, threshold=3.725e+02, percent-clipped=1.0 +2023-03-26 23:38:05,860 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108166.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:38:27,776 INFO [finetune.py:976] (5/7) Epoch 19, batch 5100, loss[loss=0.163, simple_loss=0.2412, pruned_loss=0.04237, over 4855.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2432, pruned_loss=0.05142, over 957872.25 frames. ], batch size: 44, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:38:38,020 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=108214.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:38:44,672 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.31 vs. limit=5.0 +2023-03-26 23:39:00,757 INFO [finetune.py:976] (5/7) Epoch 19, batch 5150, loss[loss=0.1544, simple_loss=0.2347, pruned_loss=0.03701, over 4832.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2432, pruned_loss=0.05138, over 956531.48 frames. ], batch size: 51, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:39:04,793 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.278e+01 1.451e+02 1.873e+02 2.231e+02 4.201e+02, threshold=3.747e+02, percent-clipped=0.0 +2023-03-26 23:39:15,525 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.5287, 5.1862, 4.8637, 3.2012, 5.2531, 4.2548, 1.4132, 4.0012], + device='cuda:5'), covar=tensor([0.1895, 0.2051, 0.1323, 0.2682, 0.0685, 0.0650, 0.4269, 0.1155], + device='cuda:5'), in_proj_covar=tensor([0.0149, 0.0173, 0.0157, 0.0127, 0.0157, 0.0120, 0.0144, 0.0120], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-26 23:39:39,543 INFO [finetune.py:976] (5/7) Epoch 19, batch 5200, loss[loss=0.187, simple_loss=0.257, pruned_loss=0.05852, over 4811.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2468, pruned_loss=0.05265, over 954034.23 frames. ], batch size: 25, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:39:49,780 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9600, 1.7042, 2.0881, 1.9148, 1.7974, 1.7795, 1.9906, 1.9770], + device='cuda:5'), covar=tensor([0.3281, 0.3311, 0.2627, 0.3433, 0.3977, 0.3457, 0.3691, 0.2501], + device='cuda:5'), in_proj_covar=tensor([0.0252, 0.0241, 0.0261, 0.0280, 0.0277, 0.0252, 0.0287, 0.0243], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:39:54,429 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108315.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:40:00,192 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-03-26 23:40:00,994 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108325.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:40:16,477 INFO [finetune.py:976] (5/7) Epoch 19, batch 5250, loss[loss=0.1552, simple_loss=0.2269, pruned_loss=0.04176, over 4832.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2506, pruned_loss=0.0543, over 953398.43 frames. ], batch size: 39, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:40:19,991 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.566e+02 1.984e+02 2.332e+02 4.295e+02, threshold=3.968e+02, percent-clipped=2.0 +2023-03-26 23:40:26,552 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=108363.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:40:42,058 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108386.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:40:46,345 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108393.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:40:49,959 INFO [finetune.py:976] (5/7) Epoch 19, batch 5300, loss[loss=0.2052, simple_loss=0.2664, pruned_loss=0.07199, over 4769.00 frames. ], tot_loss[loss=0.1824, simple_loss=0.2532, pruned_loss=0.0558, over 953207.12 frames. ], batch size: 26, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:40:53,138 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1522, 1.9951, 1.5777, 0.6019, 1.6268, 1.7662, 1.6212, 1.8028], + device='cuda:5'), covar=tensor([0.0860, 0.0735, 0.1388, 0.2024, 0.1308, 0.2270, 0.2340, 0.0893], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0194, 0.0200, 0.0183, 0.0211, 0.0208, 0.0223, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:41:22,543 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108432.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:41:22,576 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1120, 1.9291, 1.6892, 1.9194, 1.8391, 1.8356, 1.8331, 2.5879], + device='cuda:5'), covar=tensor([0.3883, 0.4695, 0.3329, 0.3795, 0.4238, 0.2605, 0.4095, 0.1742], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0260, 0.0230, 0.0275, 0.0251, 0.0221, 0.0253, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:41:32,021 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=108441.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:41:36,861 INFO [finetune.py:976] (5/7) Epoch 19, batch 5350, loss[loss=0.1785, simple_loss=0.2581, pruned_loss=0.04944, over 4897.00 frames. ], tot_loss[loss=0.1816, simple_loss=0.2528, pruned_loss=0.05523, over 953796.50 frames. ], batch size: 35, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:41:39,876 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.546e+01 1.453e+02 1.815e+02 2.266e+02 3.194e+02, threshold=3.630e+02, percent-clipped=0.0 +2023-03-26 23:42:09,111 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108493.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:42:12,600 INFO [finetune.py:976] (5/7) Epoch 19, batch 5400, loss[loss=0.1532, simple_loss=0.2211, pruned_loss=0.0427, over 4828.00 frames. ], tot_loss[loss=0.18, simple_loss=0.2504, pruned_loss=0.05483, over 953906.38 frames. ], batch size: 33, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:42:53,852 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4077, 2.1885, 1.9042, 2.1697, 2.2912, 2.1089, 2.4889, 2.3580], + device='cuda:5'), covar=tensor([0.1261, 0.2034, 0.2951, 0.2487, 0.2411, 0.1739, 0.2459, 0.1759], + device='cuda:5'), in_proj_covar=tensor([0.0183, 0.0187, 0.0233, 0.0252, 0.0245, 0.0202, 0.0213, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:42:58,628 INFO [finetune.py:976] (5/7) Epoch 19, batch 5450, loss[loss=0.1837, simple_loss=0.2422, pruned_loss=0.06255, over 4908.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2466, pruned_loss=0.05369, over 953686.26 frames. ], batch size: 43, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:43:01,646 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.000e+02 1.562e+02 1.816e+02 2.216e+02 4.232e+02, threshold=3.632e+02, percent-clipped=2.0 +2023-03-26 23:43:04,528 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.83 vs. limit=5.0 +2023-03-26 23:43:11,306 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-26 23:43:17,246 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108577.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:43:31,854 INFO [finetune.py:976] (5/7) Epoch 19, batch 5500, loss[loss=0.1947, simple_loss=0.2609, pruned_loss=0.06426, over 4819.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.2432, pruned_loss=0.0527, over 954647.36 frames. ], batch size: 38, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:43:42,703 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.45 vs. limit=5.0 +2023-03-26 23:43:58,816 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108638.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:44:05,685 INFO [finetune.py:976] (5/7) Epoch 19, batch 5550, loss[loss=0.1847, simple_loss=0.248, pruned_loss=0.06072, over 4874.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2452, pruned_loss=0.05316, over 956499.05 frames. ], batch size: 31, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:44:08,704 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.553e+02 1.822e+02 2.201e+02 3.552e+02, threshold=3.643e+02, percent-clipped=0.0 +2023-03-26 23:44:27,082 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108681.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:44:37,477 INFO [finetune.py:976] (5/7) Epoch 19, batch 5600, loss[loss=0.1719, simple_loss=0.2444, pruned_loss=0.04971, over 4892.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.2496, pruned_loss=0.05463, over 956500.45 frames. ], batch size: 32, lr: 3.27e-03, grad_scale: 32.0 +2023-03-26 23:44:41,110 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9519, 3.0599, 2.7376, 1.9975, 3.0206, 3.0056, 3.2458, 2.6773], + device='cuda:5'), covar=tensor([0.0508, 0.0460, 0.0669, 0.0873, 0.0543, 0.0704, 0.0483, 0.0798], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0135, 0.0140, 0.0121, 0.0125, 0.0139, 0.0139, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:44:52,544 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-26 23:45:09,465 INFO [finetune.py:976] (5/7) Epoch 19, batch 5650, loss[loss=0.1455, simple_loss=0.2201, pruned_loss=0.03543, over 4720.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2524, pruned_loss=0.05522, over 956266.04 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:45:12,317 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.584e+02 1.878e+02 2.184e+02 3.636e+02, threshold=3.756e+02, percent-clipped=0.0 +2023-03-26 23:45:32,969 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108788.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:45:34,792 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108791.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:45:39,454 INFO [finetune.py:976] (5/7) Epoch 19, batch 5700, loss[loss=0.1375, simple_loss=0.1966, pruned_loss=0.03915, over 4032.00 frames. ], tot_loss[loss=0.178, simple_loss=0.2474, pruned_loss=0.05433, over 934813.54 frames. ], batch size: 17, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:45:49,154 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0510, 2.0278, 2.6806, 3.7583, 2.6389, 2.8815, 1.5391, 2.9316], + device='cuda:5'), covar=tensor([0.1625, 0.1234, 0.1161, 0.0494, 0.0732, 0.1078, 0.1718, 0.0498], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0115, 0.0133, 0.0163, 0.0100, 0.0135, 0.0123, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:46:08,105 INFO [finetune.py:976] (5/7) Epoch 20, batch 0, loss[loss=0.2051, simple_loss=0.2592, pruned_loss=0.07555, over 4776.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2592, pruned_loss=0.07555, over 4776.00 frames. ], batch size: 51, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:46:08,106 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-26 23:46:15,491 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8578, 1.2868, 0.9549, 1.6151, 2.1066, 1.2279, 1.6260, 1.5465], + device='cuda:5'), covar=tensor([0.1332, 0.1891, 0.1739, 0.1089, 0.1814, 0.1893, 0.1202, 0.1899], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0091, 0.0120, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:46:16,866 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2171, 2.0149, 1.8737, 1.8457, 1.9381, 2.0162, 2.0027, 2.6010], + device='cuda:5'), covar=tensor([0.3927, 0.4942, 0.3442, 0.3830, 0.4101, 0.2557, 0.4030, 0.1942], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0230, 0.0275, 0.0251, 0.0221, 0.0252, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:46:26,645 INFO [finetune.py:1010] (5/7) Epoch 20, validation: loss=0.158, simple_loss=0.2276, pruned_loss=0.04423, over 2265189.00 frames. +2023-03-26 23:46:26,646 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-26 23:46:28,019 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6296, 1.5751, 1.5075, 0.8735, 1.6496, 1.8741, 1.7919, 1.3999], + device='cuda:5'), covar=tensor([0.1155, 0.0671, 0.0576, 0.0691, 0.0513, 0.0602, 0.0375, 0.0847], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0149, 0.0124, 0.0125, 0.0131, 0.0128, 0.0142, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.0560e-05, 1.0817e-04, 8.8728e-05, 8.8289e-05, 9.1966e-05, 9.1449e-05, + 1.0205e-04, 1.0559e-04], device='cuda:5') +2023-03-26 23:46:36,764 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4485, 1.5958, 1.0085, 2.1517, 2.5745, 1.8311, 2.0359, 2.0192], + device='cuda:5'), covar=tensor([0.1252, 0.1918, 0.1866, 0.1005, 0.1643, 0.1780, 0.1255, 0.1762], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0091, 0.0120, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:46:52,592 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108852.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:46:58,209 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.806e+01 1.424e+02 1.737e+02 2.098e+02 5.389e+02, threshold=3.475e+02, percent-clipped=2.0 +2023-03-26 23:47:17,655 INFO [finetune.py:976] (5/7) Epoch 20, batch 50, loss[loss=0.1987, simple_loss=0.275, pruned_loss=0.06117, over 4817.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2486, pruned_loss=0.05314, over 216286.18 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:47:18,559 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-26 23:47:57,360 INFO [finetune.py:976] (5/7) Epoch 20, batch 100, loss[loss=0.1644, simple_loss=0.2356, pruned_loss=0.04662, over 4897.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2443, pruned_loss=0.05276, over 381275.73 frames. ], batch size: 32, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:48:04,453 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-26 23:48:06,345 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108933.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:48:13,414 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.46 vs. limit=5.0 +2023-03-26 23:48:23,147 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.362e+02 1.754e+02 2.070e+02 5.157e+02, threshold=3.508e+02, percent-clipped=1.0 +2023-03-26 23:48:38,579 INFO [finetune.py:976] (5/7) Epoch 20, batch 150, loss[loss=0.1598, simple_loss=0.2236, pruned_loss=0.04806, over 4832.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.2405, pruned_loss=0.05219, over 509011.21 frames. ], batch size: 33, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:48:41,558 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108981.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:49:04,005 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9675, 0.9724, 0.9198, 1.0790, 1.1777, 1.0677, 0.9522, 0.8997], + device='cuda:5'), covar=tensor([0.0363, 0.0261, 0.0624, 0.0282, 0.0231, 0.0460, 0.0356, 0.0376], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0106, 0.0143, 0.0111, 0.0099, 0.0110, 0.0099, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4520e-05, 8.1978e-05, 1.1242e-04, 8.4888e-05, 7.6935e-05, 8.1188e-05, + 7.3524e-05, 8.4272e-05], device='cuda:5') +2023-03-26 23:49:11,419 INFO [finetune.py:976] (5/7) Epoch 20, batch 200, loss[loss=0.1586, simple_loss=0.224, pruned_loss=0.04661, over 4144.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2411, pruned_loss=0.05284, over 606986.51 frames. ], batch size: 65, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:49:11,509 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5183, 1.3942, 1.9763, 2.8489, 1.9083, 2.1840, 1.0006, 2.3672], + device='cuda:5'), covar=tensor([0.1747, 0.1494, 0.1204, 0.0682, 0.0902, 0.1552, 0.1724, 0.0591], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0116, 0.0133, 0.0164, 0.0100, 0.0136, 0.0124, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:49:13,181 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=109029.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:49:19,013 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109037.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:49:29,131 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.519e+02 1.780e+02 2.129e+02 3.450e+02, threshold=3.561e+02, percent-clipped=0.0 +2023-03-26 23:49:32,732 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1000, 1.3484, 0.8597, 1.9428, 2.3597, 1.8043, 1.7001, 1.7893], + device='cuda:5'), covar=tensor([0.1523, 0.2145, 0.2027, 0.1190, 0.1979, 0.1881, 0.1441, 0.2069], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0095, 0.0111, 0.0091, 0.0120, 0.0093, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-26 23:49:42,645 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-26 23:49:44,473 INFO [finetune.py:976] (5/7) Epoch 20, batch 250, loss[loss=0.1195, simple_loss=0.1883, pruned_loss=0.0253, over 4787.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2443, pruned_loss=0.05354, over 682829.87 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:49:52,133 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109088.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:49:58,701 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109098.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 23:50:17,243 INFO [finetune.py:976] (5/7) Epoch 20, batch 300, loss[loss=0.2051, simple_loss=0.291, pruned_loss=0.05954, over 4823.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2476, pruned_loss=0.05387, over 743928.37 frames. ], batch size: 40, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:50:23,616 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=109136.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:50:31,196 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109147.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:50:35,398 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.518e+02 1.856e+02 2.256e+02 3.204e+02, threshold=3.712e+02, percent-clipped=0.0 +2023-03-26 23:50:50,164 INFO [finetune.py:976] (5/7) Epoch 20, batch 350, loss[loss=0.1858, simple_loss=0.2624, pruned_loss=0.0546, over 4828.00 frames. ], tot_loss[loss=0.181, simple_loss=0.2511, pruned_loss=0.05543, over 791057.03 frames. ], batch size: 49, lr: 3.26e-03, grad_scale: 64.0 +2023-03-26 23:51:00,306 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-26 23:51:01,445 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109194.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:51:01,863 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.49 vs. limit=5.0 +2023-03-26 23:51:08,326 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4161, 3.8341, 4.0378, 4.2605, 4.1906, 3.8422, 4.5046, 1.4493], + device='cuda:5'), covar=tensor([0.0785, 0.0871, 0.0767, 0.0907, 0.1105, 0.1485, 0.0584, 0.5291], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0244, 0.0276, 0.0291, 0.0331, 0.0281, 0.0300, 0.0294], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:51:10,168 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7142, 1.1904, 0.9015, 1.5305, 2.0765, 1.0435, 1.5127, 1.4572], + device='cuda:5'), covar=tensor([0.1515, 0.2178, 0.1876, 0.1235, 0.1935, 0.1968, 0.1408, 0.2097], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0091, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:51:25,272 INFO [finetune.py:976] (5/7) Epoch 20, batch 400, loss[loss=0.1767, simple_loss=0.2547, pruned_loss=0.04936, over 4799.00 frames. ], tot_loss[loss=0.1812, simple_loss=0.252, pruned_loss=0.05523, over 825279.53 frames. ], batch size: 51, lr: 3.26e-03, grad_scale: 64.0 +2023-03-26 23:51:34,318 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109233.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:52:03,603 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.047e+02 1.657e+02 1.900e+02 2.185e+02 4.941e+02, threshold=3.801e+02, percent-clipped=3.0 +2023-03-26 23:52:03,752 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2480, 2.1195, 2.0002, 2.2372, 2.7407, 2.2537, 2.2823, 1.8579], + device='cuda:5'), covar=tensor([0.1714, 0.1704, 0.1507, 0.1365, 0.1540, 0.0960, 0.1884, 0.1427], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0211, 0.0194, 0.0244, 0.0188, 0.0216, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:52:04,368 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109255.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:52:26,280 INFO [finetune.py:976] (5/7) Epoch 20, batch 450, loss[loss=0.1542, simple_loss=0.2316, pruned_loss=0.03838, over 4816.00 frames. ], tot_loss[loss=0.1799, simple_loss=0.2509, pruned_loss=0.0545, over 854336.80 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 64.0 +2023-03-26 23:52:28,705 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4529, 1.5058, 2.0175, 3.0358, 2.0349, 2.3072, 0.9097, 2.5213], + device='cuda:5'), covar=tensor([0.1891, 0.1457, 0.1234, 0.0600, 0.0865, 0.1266, 0.1975, 0.0520], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0134, 0.0165, 0.0101, 0.0137, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-26 23:52:29,294 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=109281.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:52:38,168 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109294.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:52:54,642 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109318.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:53:00,675 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-03-26 23:53:02,105 INFO [finetune.py:976] (5/7) Epoch 20, batch 500, loss[loss=0.1904, simple_loss=0.2493, pruned_loss=0.06576, over 4930.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2486, pruned_loss=0.05342, over 876319.88 frames. ], batch size: 33, lr: 3.26e-03, grad_scale: 64.0 +2023-03-26 23:53:34,823 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.087e+02 1.492e+02 1.802e+02 2.178e+02 4.247e+02, threshold=3.605e+02, percent-clipped=3.0 +2023-03-26 23:53:39,366 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109355.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:53:47,680 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3857, 1.3912, 1.4078, 0.8306, 1.4066, 1.6087, 1.7002, 1.2902], + device='cuda:5'), covar=tensor([0.0848, 0.0596, 0.0523, 0.0482, 0.0507, 0.0546, 0.0327, 0.0612], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0124, 0.0124, 0.0130, 0.0128, 0.0142, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.0243e-05, 1.0815e-04, 8.8500e-05, 8.7808e-05, 9.1689e-05, 9.1520e-05, + 1.0162e-04, 1.0523e-04], device='cuda:5') +2023-03-26 23:53:52,984 INFO [finetune.py:976] (5/7) Epoch 20, batch 550, loss[loss=0.1794, simple_loss=0.243, pruned_loss=0.05792, over 4937.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2467, pruned_loss=0.05352, over 892017.77 frames. ], batch size: 38, lr: 3.26e-03, grad_scale: 64.0 +2023-03-26 23:53:53,722 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6528, 1.6377, 1.4633, 1.6206, 1.9407, 1.8914, 1.5987, 1.4905], + device='cuda:5'), covar=tensor([0.0334, 0.0367, 0.0610, 0.0340, 0.0240, 0.0530, 0.0353, 0.0395], + device='cuda:5'), in_proj_covar=tensor([0.0096, 0.0106, 0.0143, 0.0110, 0.0099, 0.0110, 0.0099, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.4665e-05, 8.1787e-05, 1.1256e-04, 8.4643e-05, 7.6756e-05, 8.1074e-05, + 7.3525e-05, 8.4114e-05], device='cuda:5') +2023-03-26 23:53:54,327 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109379.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 23:54:03,686 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109393.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:54:25,145 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2258, 2.2165, 1.8362, 2.2516, 2.0499, 2.0878, 2.1053, 2.8685], + device='cuda:5'), covar=tensor([0.3733, 0.4655, 0.3364, 0.4416, 0.4947, 0.2369, 0.4285, 0.1564], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0231, 0.0277, 0.0253, 0.0222, 0.0253, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:54:26,239 INFO [finetune.py:976] (5/7) Epoch 20, batch 600, loss[loss=0.1511, simple_loss=0.2209, pruned_loss=0.04065, over 4706.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2452, pruned_loss=0.05287, over 906173.86 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:54:33,443 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8078, 1.3173, 1.8024, 1.7725, 1.5625, 1.5621, 1.7323, 1.7057], + device='cuda:5'), covar=tensor([0.3776, 0.3823, 0.3202, 0.3535, 0.4645, 0.3747, 0.4209, 0.2973], + device='cuda:5'), in_proj_covar=tensor([0.0251, 0.0239, 0.0260, 0.0279, 0.0276, 0.0251, 0.0286, 0.0242], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:54:39,783 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109447.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:54:44,546 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.417e+01 1.548e+02 1.729e+02 2.159e+02 3.434e+02, threshold=3.458e+02, percent-clipped=0.0 +2023-03-26 23:54:56,540 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-03-26 23:54:59,325 INFO [finetune.py:976] (5/7) Epoch 20, batch 650, loss[loss=0.2435, simple_loss=0.3065, pruned_loss=0.09029, over 4807.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2479, pruned_loss=0.05371, over 915798.23 frames. ], batch size: 38, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:55:10,858 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=109495.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:55:19,121 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9777, 1.9398, 1.8384, 2.0874, 2.3640, 2.1289, 1.8689, 1.7588], + device='cuda:5'), covar=tensor([0.1502, 0.1297, 0.1166, 0.1127, 0.1142, 0.0707, 0.1424, 0.1297], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0211, 0.0212, 0.0195, 0.0245, 0.0189, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-26 23:55:33,024 INFO [finetune.py:976] (5/7) Epoch 20, batch 700, loss[loss=0.1887, simple_loss=0.235, pruned_loss=0.07116, over 3745.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2494, pruned_loss=0.05453, over 920191.37 frames. ], batch size: 16, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:55:47,502 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109550.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:55:51,350 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.095e+02 1.485e+02 1.783e+02 2.085e+02 4.380e+02, threshold=3.566e+02, percent-clipped=3.0 +2023-03-26 23:56:06,063 INFO [finetune.py:976] (5/7) Epoch 20, batch 750, loss[loss=0.1714, simple_loss=0.2409, pruned_loss=0.0509, over 4863.00 frames. ], tot_loss[loss=0.1793, simple_loss=0.2498, pruned_loss=0.05444, over 927815.41 frames. ], batch size: 34, lr: 3.26e-03, grad_scale: 32.0 +2023-03-26 23:56:07,743 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109579.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:56:39,568 INFO [finetune.py:976] (5/7) Epoch 20, batch 800, loss[loss=0.1841, simple_loss=0.2423, pruned_loss=0.06292, over 4029.00 frames. ], tot_loss[loss=0.1789, simple_loss=0.2497, pruned_loss=0.05404, over 934223.08 frames. ], batch size: 65, lr: 3.25e-03, grad_scale: 32.0 +2023-03-26 23:56:42,610 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-26 23:56:50,326 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109640.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:56:56,835 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109650.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:56:59,766 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.018e+02 1.497e+02 1.774e+02 2.103e+02 3.199e+02, threshold=3.548e+02, percent-clipped=0.0 +2023-03-26 23:57:03,312 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109659.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:57:23,384 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109674.0, num_to_drop=1, layers_to_drop={1} +2023-03-26 23:57:25,125 INFO [finetune.py:976] (5/7) Epoch 20, batch 850, loss[loss=0.1546, simple_loss=0.2218, pruned_loss=0.04374, over 4739.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.2474, pruned_loss=0.05303, over 938614.42 frames. ], batch size: 59, lr: 3.25e-03, grad_scale: 32.0 +2023-03-26 23:57:40,051 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109693.0, num_to_drop=1, layers_to_drop={2} +2023-03-26 23:58:06,039 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109720.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:58:10,729 INFO [finetune.py:976] (5/7) Epoch 20, batch 900, loss[loss=0.1468, simple_loss=0.2222, pruned_loss=0.03569, over 4788.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2453, pruned_loss=0.05248, over 941761.71 frames. ], batch size: 28, lr: 3.25e-03, grad_scale: 32.0 +2023-03-26 23:58:22,314 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=109741.0, num_to_drop=1, layers_to_drop={0} +2023-03-26 23:58:40,321 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.530e+02 1.823e+02 2.181e+02 3.809e+02, threshold=3.647e+02, percent-clipped=1.0 +2023-03-26 23:59:03,843 INFO [finetune.py:976] (5/7) Epoch 20, batch 950, loss[loss=0.2403, simple_loss=0.2963, pruned_loss=0.09212, over 4821.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2448, pruned_loss=0.05338, over 945219.88 frames. ], batch size: 51, lr: 3.25e-03, grad_scale: 32.0 +2023-03-26 23:59:36,849 INFO [finetune.py:976] (5/7) Epoch 20, batch 1000, loss[loss=0.2007, simple_loss=0.2779, pruned_loss=0.0618, over 4829.00 frames. ], tot_loss[loss=0.177, simple_loss=0.246, pruned_loss=0.05395, over 946702.01 frames. ], batch size: 39, lr: 3.25e-03, grad_scale: 32.0 +2023-03-26 23:59:39,038 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-26 23:59:52,404 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109850.0, num_to_drop=0, layers_to_drop=set() +2023-03-26 23:59:55,344 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.630e+02 1.951e+02 2.313e+02 5.473e+02, threshold=3.903e+02, percent-clipped=2.0 +2023-03-27 00:00:00,723 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6046, 3.4693, 3.2828, 1.5112, 3.5982, 2.6957, 0.7756, 2.3665], + device='cuda:5'), covar=tensor([0.2235, 0.2431, 0.1621, 0.3888, 0.1152, 0.1055, 0.4582, 0.1749], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0131, 0.0161, 0.0123, 0.0147, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 00:00:08,420 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6897, 1.5585, 1.5378, 1.6042, 1.1629, 3.6411, 1.4589, 1.8089], + device='cuda:5'), covar=tensor([0.3230, 0.2355, 0.2129, 0.2285, 0.1795, 0.0175, 0.2529, 0.1243], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0122, 0.0114, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:00:10,589 INFO [finetune.py:976] (5/7) Epoch 20, batch 1050, loss[loss=0.1819, simple_loss=0.2585, pruned_loss=0.05269, over 4902.00 frames. ], tot_loss[loss=0.178, simple_loss=0.2483, pruned_loss=0.05386, over 950623.53 frames. ], batch size: 43, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:00:10,793 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-27 00:00:24,789 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=109898.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:00:43,711 INFO [finetune.py:976] (5/7) Epoch 20, batch 1100, loss[loss=0.2269, simple_loss=0.2957, pruned_loss=0.07903, over 4905.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2498, pruned_loss=0.05435, over 949997.81 frames. ], batch size: 36, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:00:49,670 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109935.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:00:59,780 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109950.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:01:01,372 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-27 00:01:02,702 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.532e+02 1.890e+02 2.271e+02 3.423e+02, threshold=3.780e+02, percent-clipped=0.0 +2023-03-27 00:01:13,355 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-03-27 00:01:14,946 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0253, 1.3366, 2.0508, 1.9461, 1.8200, 1.8001, 1.8894, 1.9371], + device='cuda:5'), covar=tensor([0.3606, 0.4040, 0.3557, 0.3563, 0.4698, 0.3538, 0.4667, 0.3114], + device='cuda:5'), in_proj_covar=tensor([0.0253, 0.0241, 0.0262, 0.0281, 0.0279, 0.0254, 0.0290, 0.0244], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:01:15,523 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109974.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:01:17,221 INFO [finetune.py:976] (5/7) Epoch 20, batch 1150, loss[loss=0.1866, simple_loss=0.2611, pruned_loss=0.05603, over 4815.00 frames. ], tot_loss[loss=0.1789, simple_loss=0.25, pruned_loss=0.05393, over 950933.14 frames. ], batch size: 25, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:01:31,932 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=109998.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:01:44,414 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110015.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:01:45,675 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110017.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:01:48,683 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110022.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:01:52,168 INFO [finetune.py:976] (5/7) Epoch 20, batch 1200, loss[loss=0.1515, simple_loss=0.2286, pruned_loss=0.03718, over 4721.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2486, pruned_loss=0.05334, over 952649.71 frames. ], batch size: 54, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:02:04,545 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110044.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:02:08,829 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 00:02:11,656 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.682e+01 1.527e+02 1.776e+02 2.166e+02 4.163e+02, threshold=3.552e+02, percent-clipped=2.0 +2023-03-27 00:02:32,493 INFO [finetune.py:976] (5/7) Epoch 20, batch 1250, loss[loss=0.2026, simple_loss=0.2592, pruned_loss=0.07299, over 4741.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.247, pruned_loss=0.05346, over 952568.18 frames. ], batch size: 54, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:02:33,241 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110078.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:03:02,209 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110105.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:03:09,741 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.76 vs. limit=5.0 +2023-03-27 00:03:09,985 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110110.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:03:13,073 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110115.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:03:23,864 INFO [finetune.py:976] (5/7) Epoch 20, batch 1300, loss[loss=0.1564, simple_loss=0.2255, pruned_loss=0.04361, over 4904.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2435, pruned_loss=0.05229, over 955628.13 frames. ], batch size: 37, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:03:25,172 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-03-27 00:03:33,297 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7145, 2.7730, 2.6126, 2.0213, 2.7509, 3.0212, 3.0679, 2.4399], + device='cuda:5'), covar=tensor([0.0545, 0.0504, 0.0658, 0.0803, 0.0734, 0.0546, 0.0448, 0.0841], + device='cuda:5'), in_proj_covar=tensor([0.0134, 0.0135, 0.0141, 0.0121, 0.0125, 0.0140, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:03:45,428 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.532e+02 1.884e+02 2.318e+02 4.682e+02, threshold=3.767e+02, percent-clipped=2.0 +2023-03-27 00:04:04,747 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110171.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:04:12,711 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110176.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:04:13,180 INFO [finetune.py:976] (5/7) Epoch 20, batch 1350, loss[loss=0.169, simple_loss=0.2495, pruned_loss=0.04428, over 4817.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2434, pruned_loss=0.05235, over 956454.87 frames. ], batch size: 38, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:05:03,454 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7402, 1.5794, 1.6798, 1.5993, 1.2418, 3.6978, 1.4145, 1.8691], + device='cuda:5'), covar=tensor([0.3321, 0.2501, 0.2062, 0.2445, 0.1742, 0.0190, 0.2603, 0.1267], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0114, 0.0119, 0.0122, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:05:15,847 INFO [finetune.py:976] (5/7) Epoch 20, batch 1400, loss[loss=0.1502, simple_loss=0.2001, pruned_loss=0.05013, over 3944.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2469, pruned_loss=0.0539, over 956436.93 frames. ], batch size: 17, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:05:22,727 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9981, 0.8962, 0.9513, 1.0164, 1.1525, 1.1293, 0.9964, 0.9635], + device='cuda:5'), covar=tensor([0.0396, 0.0312, 0.0614, 0.0299, 0.0283, 0.0431, 0.0333, 0.0362], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0108, 0.0145, 0.0112, 0.0100, 0.0111, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.5380e-05, 8.2971e-05, 1.1393e-04, 8.5876e-05, 7.8071e-05, 8.2157e-05, + 7.4472e-05, 8.5134e-05], device='cuda:5') +2023-03-27 00:05:26,731 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110235.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:05:48,218 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.073e+02 1.545e+02 1.824e+02 2.176e+02 3.637e+02, threshold=3.648e+02, percent-clipped=0.0 +2023-03-27 00:05:56,182 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.25 vs. limit=5.0 +2023-03-27 00:06:02,045 INFO [finetune.py:976] (5/7) Epoch 20, batch 1450, loss[loss=0.198, simple_loss=0.2713, pruned_loss=0.06233, over 4798.00 frames. ], tot_loss[loss=0.1788, simple_loss=0.2489, pruned_loss=0.05437, over 956870.04 frames. ], batch size: 51, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:06:06,235 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110283.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:06:06,284 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110283.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:06:20,398 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110303.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:06:28,592 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110315.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:06:30,442 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110318.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:06:32,940 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.46 vs. limit=5.0 +2023-03-27 00:06:35,827 INFO [finetune.py:976] (5/7) Epoch 20, batch 1500, loss[loss=0.1715, simple_loss=0.2627, pruned_loss=0.04017, over 4836.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.2501, pruned_loss=0.05444, over 956234.51 frames. ], batch size: 47, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:06:47,649 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110344.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:06:55,109 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.590e+02 1.939e+02 2.244e+02 3.777e+02, threshold=3.878e+02, percent-clipped=2.0 +2023-03-27 00:07:00,486 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110363.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:07:01,160 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110364.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:07:07,073 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110373.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:07:09,448 INFO [finetune.py:976] (5/7) Epoch 20, batch 1550, loss[loss=0.1802, simple_loss=0.2522, pruned_loss=0.05416, over 4811.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.25, pruned_loss=0.05422, over 954154.91 frames. ], batch size: 39, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:07:10,821 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110379.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:07:25,519 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110400.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:07:43,129 INFO [finetune.py:976] (5/7) Epoch 20, batch 1600, loss[loss=0.1434, simple_loss=0.2144, pruned_loss=0.03617, over 4788.00 frames. ], tot_loss[loss=0.1786, simple_loss=0.2487, pruned_loss=0.05421, over 954419.93 frames. ], batch size: 29, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:08:13,512 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.758e+01 1.502e+02 1.787e+02 2.306e+02 4.709e+02, threshold=3.574e+02, percent-clipped=2.0 +2023-03-27 00:08:29,713 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110466.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:08:33,170 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110471.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:08:40,392 INFO [finetune.py:976] (5/7) Epoch 20, batch 1650, loss[loss=0.1769, simple_loss=0.2397, pruned_loss=0.05707, over 4845.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2465, pruned_loss=0.054, over 956691.31 frames. ], batch size: 47, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:08:53,162 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0160, 0.8951, 0.9004, 1.0828, 1.1821, 1.1476, 0.9723, 0.9452], + device='cuda:5'), covar=tensor([0.0362, 0.0333, 0.0642, 0.0299, 0.0286, 0.0435, 0.0385, 0.0394], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0107, 0.0144, 0.0111, 0.0100, 0.0111, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.5287e-05, 8.2659e-05, 1.1362e-04, 8.5584e-05, 7.7930e-05, 8.2311e-05, + 7.4095e-05, 8.4972e-05], device='cuda:5') +2023-03-27 00:09:24,057 INFO [finetune.py:976] (5/7) Epoch 20, batch 1700, loss[loss=0.1733, simple_loss=0.247, pruned_loss=0.04976, over 4842.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2448, pruned_loss=0.05361, over 956313.46 frames. ], batch size: 47, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:09:36,237 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0392, 1.9157, 2.0545, 1.5867, 1.9305, 2.1356, 2.1785, 1.6436], + device='cuda:5'), covar=tensor([0.0488, 0.0501, 0.0543, 0.0721, 0.0801, 0.0465, 0.0419, 0.0930], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0134, 0.0139, 0.0119, 0.0123, 0.0138, 0.0139, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:09:42,518 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.071e+02 1.500e+02 1.773e+02 2.246e+02 3.830e+02, threshold=3.546e+02, percent-clipped=2.0 +2023-03-27 00:09:57,624 INFO [finetune.py:976] (5/7) Epoch 20, batch 1750, loss[loss=0.19, simple_loss=0.2572, pruned_loss=0.06137, over 4830.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2452, pruned_loss=0.05328, over 956732.32 frames. ], batch size: 33, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:10:08,604 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6266, 1.5333, 2.1846, 1.8465, 1.7951, 3.9146, 1.5360, 1.7245], + device='cuda:5'), covar=tensor([0.0950, 0.1897, 0.1366, 0.1083, 0.1597, 0.0225, 0.1545, 0.1938], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:10:20,398 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110611.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:10:30,990 INFO [finetune.py:976] (5/7) Epoch 20, batch 1800, loss[loss=0.1639, simple_loss=0.2373, pruned_loss=0.04524, over 4765.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2486, pruned_loss=0.05397, over 955110.06 frames. ], batch size: 28, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:10:38,898 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110639.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:10:39,014 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-27 00:10:55,668 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.630e+02 1.887e+02 2.220e+02 3.285e+02, threshold=3.774e+02, percent-clipped=0.0 +2023-03-27 00:11:01,586 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110659.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:11:03,363 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0787, 2.0021, 1.6429, 1.7530, 1.8962, 1.8341, 1.9066, 2.6108], + device='cuda:5'), covar=tensor([0.3567, 0.3853, 0.3230, 0.3596, 0.3695, 0.2440, 0.3703, 0.1586], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0261, 0.0230, 0.0275, 0.0251, 0.0220, 0.0251, 0.0232], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:11:11,034 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110672.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:11:11,631 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110673.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:11:12,218 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110674.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:11:14,004 INFO [finetune.py:976] (5/7) Epoch 20, batch 1850, loss[loss=0.2119, simple_loss=0.2878, pruned_loss=0.06798, over 4899.00 frames. ], tot_loss[loss=0.1785, simple_loss=0.2489, pruned_loss=0.05402, over 954192.22 frames. ], batch size: 36, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:11:29,602 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110700.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:11:44,152 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110721.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:11:47,774 INFO [finetune.py:976] (5/7) Epoch 20, batch 1900, loss[loss=0.1748, simple_loss=0.2425, pruned_loss=0.0535, over 4881.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2479, pruned_loss=0.05288, over 955390.06 frames. ], batch size: 35, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:11:47,878 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5575, 1.7594, 2.3686, 1.8857, 2.0877, 4.3880, 1.8510, 1.9972], + device='cuda:5'), covar=tensor([0.0970, 0.1672, 0.1132, 0.0992, 0.1347, 0.0191, 0.1323, 0.1707], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:11:54,347 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110736.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:11:58,549 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4176, 3.8168, 4.0534, 4.2708, 4.1633, 3.8507, 4.4947, 1.2987], + device='cuda:5'), covar=tensor([0.0814, 0.0929, 0.0860, 0.0958, 0.1374, 0.1797, 0.0754, 0.6188], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0246, 0.0279, 0.0293, 0.0334, 0.0284, 0.0304, 0.0298], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:12:01,621 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110748.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:12:06,290 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.024e+02 1.472e+02 1.826e+02 2.198e+02 3.929e+02, threshold=3.651e+02, percent-clipped=1.0 +2023-03-27 00:12:14,065 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110766.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:12:17,569 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110771.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:12:21,624 INFO [finetune.py:976] (5/7) Epoch 20, batch 1950, loss[loss=0.1714, simple_loss=0.2291, pruned_loss=0.0568, over 4727.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2459, pruned_loss=0.05228, over 955122.23 frames. ], batch size: 23, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:12:25,415 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2338, 1.7702, 2.2243, 2.1843, 1.8614, 1.8986, 2.1679, 2.0501], + device='cuda:5'), covar=tensor([0.3976, 0.4159, 0.3439, 0.3841, 0.5260, 0.3883, 0.4889, 0.3215], + device='cuda:5'), in_proj_covar=tensor([0.0253, 0.0241, 0.0262, 0.0281, 0.0279, 0.0254, 0.0289, 0.0244], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:12:34,812 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110797.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 00:12:46,182 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110814.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:12:49,733 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110819.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:12:55,563 INFO [finetune.py:976] (5/7) Epoch 20, batch 2000, loss[loss=0.1826, simple_loss=0.2453, pruned_loss=0.05997, over 4870.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2448, pruned_loss=0.05241, over 954554.82 frames. ], batch size: 31, lr: 3.25e-03, grad_scale: 32.0 +2023-03-27 00:13:17,583 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.033e+02 1.482e+02 1.740e+02 2.016e+02 2.901e+02, threshold=3.480e+02, percent-clipped=0.0 +2023-03-27 00:13:30,408 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110868.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:13:38,675 INFO [finetune.py:976] (5/7) Epoch 20, batch 2050, loss[loss=0.1375, simple_loss=0.2104, pruned_loss=0.0323, over 4834.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2415, pruned_loss=0.05146, over 953413.79 frames. ], batch size: 33, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:13:55,063 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3154, 4.5818, 4.8980, 5.1622, 5.0481, 4.7114, 5.3878, 1.5614], + device='cuda:5'), covar=tensor([0.0640, 0.0842, 0.0650, 0.0715, 0.1124, 0.1574, 0.0431, 0.5993], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0245, 0.0278, 0.0292, 0.0333, 0.0283, 0.0302, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:14:28,243 INFO [finetune.py:976] (5/7) Epoch 20, batch 2100, loss[loss=0.2436, simple_loss=0.3003, pruned_loss=0.09338, over 4846.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.241, pruned_loss=0.05118, over 954581.59 frames. ], batch size: 47, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:14:29,600 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110929.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:14:39,998 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110939.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:14:50,629 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.458e+01 1.520e+02 1.862e+02 2.217e+02 3.516e+02, threshold=3.725e+02, percent-clipped=1.0 +2023-03-27 00:14:52,594 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110958.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:14:53,170 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110959.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:14:58,413 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110967.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:15:03,704 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110974.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:15:05,921 INFO [finetune.py:976] (5/7) Epoch 20, batch 2150, loss[loss=0.2806, simple_loss=0.3307, pruned_loss=0.1152, over 4913.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2439, pruned_loss=0.05223, over 953065.62 frames. ], batch size: 42, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:15:10,221 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-27 00:15:12,590 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=110987.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:15:25,802 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=111007.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:15:33,646 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111019.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:15:35,880 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=111022.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:15:38,857 INFO [finetune.py:976] (5/7) Epoch 20, batch 2200, loss[loss=0.2502, simple_loss=0.3131, pruned_loss=0.0936, over 4152.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.248, pruned_loss=0.05389, over 952162.72 frames. ], batch size: 65, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:16:00,235 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.523e+02 1.854e+02 2.195e+02 4.707e+02, threshold=3.708e+02, percent-clipped=2.0 +2023-03-27 00:16:20,240 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0314, 2.7978, 2.5908, 1.3587, 2.6569, 2.0677, 2.0688, 2.5237], + device='cuda:5'), covar=tensor([0.0903, 0.0712, 0.1712, 0.2200, 0.1656, 0.2319, 0.2088, 0.1097], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0192, 0.0201, 0.0183, 0.0211, 0.0208, 0.0224, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:16:23,018 INFO [finetune.py:976] (5/7) Epoch 20, batch 2250, loss[loss=0.1936, simple_loss=0.2678, pruned_loss=0.05966, over 4826.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.2483, pruned_loss=0.05317, over 953456.33 frames. ], batch size: 30, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:16:29,627 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5872, 1.6242, 1.3535, 1.7739, 1.8689, 1.7896, 1.3129, 1.3136], + device='cuda:5'), covar=tensor([0.2507, 0.2012, 0.2176, 0.1611, 0.1909, 0.1255, 0.2741, 0.2029], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0210, 0.0211, 0.0194, 0.0243, 0.0188, 0.0216, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:16:33,693 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111092.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 00:16:39,363 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 00:16:56,461 INFO [finetune.py:976] (5/7) Epoch 20, batch 2300, loss[loss=0.1775, simple_loss=0.2536, pruned_loss=0.05073, over 4722.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2488, pruned_loss=0.05284, over 951828.78 frames. ], batch size: 59, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:17:14,318 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5828, 1.6921, 2.4204, 1.9597, 1.9237, 4.2361, 1.6232, 1.9319], + device='cuda:5'), covar=tensor([0.1017, 0.1806, 0.1069, 0.1003, 0.1514, 0.0232, 0.1534, 0.1760], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0074, 0.0077, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:17:15,906 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.510e+02 1.791e+02 2.077e+02 4.254e+02, threshold=3.582e+02, percent-clipped=2.0 +2023-03-27 00:17:17,252 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2918, 2.0247, 2.4582, 1.8266, 2.2537, 2.6269, 2.0640, 2.6713], + device='cuda:5'), covar=tensor([0.1295, 0.1975, 0.1704, 0.1863, 0.1014, 0.1256, 0.2354, 0.0776], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0204, 0.0191, 0.0190, 0.0174, 0.0213, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:17:30,235 INFO [finetune.py:976] (5/7) Epoch 20, batch 2350, loss[loss=0.1678, simple_loss=0.2373, pruned_loss=0.04913, over 4915.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2459, pruned_loss=0.05214, over 952152.83 frames. ], batch size: 37, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:18:01,643 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111224.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:18:02,317 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111225.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:18:03,427 INFO [finetune.py:976] (5/7) Epoch 20, batch 2400, loss[loss=0.1339, simple_loss=0.2106, pruned_loss=0.02857, over 4806.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2445, pruned_loss=0.05215, over 952702.39 frames. ], batch size: 45, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:18:22,739 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.826e+01 1.548e+02 1.775e+02 2.063e+02 3.363e+02, threshold=3.550e+02, percent-clipped=0.0 +2023-03-27 00:18:30,984 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111267.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:18:36,919 INFO [finetune.py:976] (5/7) Epoch 20, batch 2450, loss[loss=0.1612, simple_loss=0.2227, pruned_loss=0.04987, over 4826.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2426, pruned_loss=0.05169, over 954553.49 frames. ], batch size: 33, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:18:50,052 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111286.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:19:21,768 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111314.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:19:22,367 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=111315.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:19:33,329 INFO [finetune.py:976] (5/7) Epoch 20, batch 2500, loss[loss=0.2203, simple_loss=0.2884, pruned_loss=0.07615, over 4780.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2442, pruned_loss=0.05232, over 954304.15 frames. ], batch size: 54, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:19:39,202 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.39 vs. limit=5.0 +2023-03-27 00:19:39,516 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0668, 1.7429, 2.0559, 2.1017, 1.8444, 1.8234, 2.0417, 1.9072], + device='cuda:5'), covar=tensor([0.4184, 0.4160, 0.3219, 0.3906, 0.4816, 0.3881, 0.4542, 0.3155], + device='cuda:5'), in_proj_covar=tensor([0.0254, 0.0243, 0.0264, 0.0283, 0.0281, 0.0255, 0.0291, 0.0245], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:19:47,716 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-27 00:20:03,162 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.143e+02 1.591e+02 1.841e+02 2.119e+02 4.112e+02, threshold=3.683e+02, percent-clipped=1.0 +2023-03-27 00:20:17,418 INFO [finetune.py:976] (5/7) Epoch 20, batch 2550, loss[loss=0.1714, simple_loss=0.2467, pruned_loss=0.04804, over 4869.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2476, pruned_loss=0.05303, over 953092.29 frames. ], batch size: 34, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:20:27,522 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111392.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 00:20:51,266 INFO [finetune.py:976] (5/7) Epoch 20, batch 2600, loss[loss=0.1719, simple_loss=0.2442, pruned_loss=0.04978, over 4904.00 frames. ], tot_loss[loss=0.1785, simple_loss=0.2495, pruned_loss=0.05373, over 953056.93 frames. ], batch size: 43, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:20:59,728 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=111440.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:21:10,162 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.233e+02 1.591e+02 1.878e+02 2.220e+02 5.233e+02, threshold=3.757e+02, percent-clipped=1.0 +2023-03-27 00:21:20,910 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111468.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:21:31,397 INFO [finetune.py:976] (5/7) Epoch 20, batch 2650, loss[loss=0.1516, simple_loss=0.2329, pruned_loss=0.03511, over 4768.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2513, pruned_loss=0.05397, over 954769.20 frames. ], batch size: 28, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:22:01,196 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.79 vs. limit=5.0 +2023-03-27 00:22:02,664 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6686, 1.6680, 1.4264, 1.6036, 2.0194, 1.9286, 1.7268, 1.5008], + device='cuda:5'), covar=tensor([0.0308, 0.0302, 0.0569, 0.0298, 0.0210, 0.0448, 0.0288, 0.0391], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0107, 0.0145, 0.0111, 0.0100, 0.0111, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5202e-05, 8.2597e-05, 1.1384e-04, 8.5344e-05, 7.7832e-05, 8.2046e-05, + 7.4422e-05, 8.5506e-05], device='cuda:5') +2023-03-27 00:22:06,261 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111524.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:22:07,979 INFO [finetune.py:976] (5/7) Epoch 20, batch 2700, loss[loss=0.1442, simple_loss=0.2131, pruned_loss=0.03766, over 4781.00 frames. ], tot_loss[loss=0.1786, simple_loss=0.2502, pruned_loss=0.05353, over 955093.55 frames. ], batch size: 51, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:22:10,279 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111529.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:22:19,188 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3297, 1.2724, 1.2263, 1.2812, 1.5913, 1.4837, 1.3288, 1.1791], + device='cuda:5'), covar=tensor([0.0326, 0.0298, 0.0608, 0.0289, 0.0240, 0.0451, 0.0303, 0.0386], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0107, 0.0145, 0.0111, 0.0100, 0.0111, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5230e-05, 8.2648e-05, 1.1395e-04, 8.5428e-05, 7.7899e-05, 8.2105e-05, + 7.4476e-05, 8.5633e-05], device='cuda:5') +2023-03-27 00:22:27,297 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.466e+01 1.581e+02 1.832e+02 2.307e+02 3.346e+02, threshold=3.664e+02, percent-clipped=0.0 +2023-03-27 00:22:38,120 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=111572.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:22:41,150 INFO [finetune.py:976] (5/7) Epoch 20, batch 2750, loss[loss=0.2028, simple_loss=0.2693, pruned_loss=0.06811, over 4729.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2487, pruned_loss=0.05391, over 955606.38 frames. ], batch size: 54, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:22:44,078 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111581.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:22:49,576 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7357, 2.4671, 2.0357, 0.9610, 2.3019, 2.1163, 1.8895, 2.2620], + device='cuda:5'), covar=tensor([0.0946, 0.0842, 0.1747, 0.2209, 0.1528, 0.2123, 0.2280, 0.1016], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0200, 0.0182, 0.0211, 0.0206, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:23:06,510 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111614.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:23:07,081 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9325, 3.9864, 3.7942, 2.0464, 4.1082, 3.3092, 1.2968, 3.0953], + device='cuda:5'), covar=tensor([0.2120, 0.1909, 0.1591, 0.3284, 0.1034, 0.0847, 0.4015, 0.1302], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0179, 0.0162, 0.0132, 0.0163, 0.0124, 0.0149, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 00:23:14,366 INFO [finetune.py:976] (5/7) Epoch 20, batch 2800, loss[loss=0.125, simple_loss=0.2025, pruned_loss=0.02373, over 4868.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.246, pruned_loss=0.05328, over 955535.99 frames. ], batch size: 34, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:23:19,794 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5262, 1.0641, 0.7466, 1.4504, 2.0133, 1.1417, 1.2855, 1.4329], + device='cuda:5'), covar=tensor([0.1956, 0.2842, 0.2494, 0.1548, 0.2155, 0.2505, 0.2038, 0.2711], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0111, 0.0092, 0.0120, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 00:23:31,138 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-27 00:23:32,760 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.029e+01 1.565e+02 1.748e+02 2.177e+02 3.583e+02, threshold=3.496e+02, percent-clipped=0.0 +2023-03-27 00:23:38,053 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=111662.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:23:48,042 INFO [finetune.py:976] (5/7) Epoch 20, batch 2850, loss[loss=0.2062, simple_loss=0.2709, pruned_loss=0.07073, over 4820.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2439, pruned_loss=0.05244, over 955462.30 frames. ], batch size: 33, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:23:49,304 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3411, 2.9470, 2.7860, 1.2578, 3.0643, 2.3143, 0.8155, 1.9319], + device='cuda:5'), covar=tensor([0.2384, 0.2478, 0.1787, 0.3763, 0.1422, 0.1106, 0.4067, 0.1727], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0178, 0.0162, 0.0131, 0.0163, 0.0124, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 00:24:01,296 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-03-27 00:24:09,417 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6246, 3.6258, 3.3604, 1.6193, 3.7537, 2.8316, 0.7375, 2.5565], + device='cuda:5'), covar=tensor([0.2168, 0.1983, 0.1604, 0.3551, 0.1190, 0.0968, 0.4546, 0.1533], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0179, 0.0162, 0.0132, 0.0163, 0.0124, 0.0149, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 00:24:40,139 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-03-27 00:24:41,634 INFO [finetune.py:976] (5/7) Epoch 20, batch 2900, loss[loss=0.2058, simple_loss=0.2899, pruned_loss=0.06089, over 4824.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.2466, pruned_loss=0.05341, over 956850.13 frames. ], batch size: 40, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:25:11,858 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2995, 2.9059, 3.0783, 3.2301, 3.0604, 2.8682, 3.3445, 0.9441], + device='cuda:5'), covar=tensor([0.1205, 0.1110, 0.1160, 0.1234, 0.1636, 0.2054, 0.1184, 0.5942], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0246, 0.0279, 0.0293, 0.0334, 0.0284, 0.0305, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:25:12,983 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.101e+02 1.542e+02 1.820e+02 2.254e+02 5.949e+02, threshold=3.641e+02, percent-clipped=1.0 +2023-03-27 00:25:29,104 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5483, 1.4287, 1.4089, 1.5685, 0.9943, 2.9412, 1.0248, 1.5802], + device='cuda:5'), covar=tensor([0.3171, 0.2408, 0.2128, 0.2274, 0.1769, 0.0248, 0.2810, 0.1231], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0113, 0.0096, 0.0095, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:25:31,433 INFO [finetune.py:976] (5/7) Epoch 20, batch 2950, loss[loss=0.1479, simple_loss=0.2107, pruned_loss=0.04253, over 4206.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2488, pruned_loss=0.05349, over 954667.42 frames. ], batch size: 18, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:25:40,069 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111791.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 00:25:51,588 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-27 00:26:03,398 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111824.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:26:05,121 INFO [finetune.py:976] (5/7) Epoch 20, batch 3000, loss[loss=0.1733, simple_loss=0.2461, pruned_loss=0.05027, over 4913.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.2515, pruned_loss=0.05468, over 956181.95 frames. ], batch size: 38, lr: 3.24e-03, grad_scale: 64.0 +2023-03-27 00:26:05,122 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 00:26:20,305 INFO [finetune.py:1010] (5/7) Epoch 20, validation: loss=0.1563, simple_loss=0.2257, pruned_loss=0.04344, over 2265189.00 frames. +2023-03-27 00:26:20,305 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 00:26:31,143 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8021, 1.2513, 1.8419, 1.7646, 1.5625, 1.5178, 1.7283, 1.7105], + device='cuda:5'), covar=tensor([0.3477, 0.3595, 0.3042, 0.3154, 0.4368, 0.3554, 0.3773, 0.2724], + device='cuda:5'), in_proj_covar=tensor([0.0253, 0.0242, 0.0262, 0.0281, 0.0279, 0.0253, 0.0289, 0.0243], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:26:40,758 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111852.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 00:26:48,435 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.618e+02 1.875e+02 2.230e+02 3.575e+02, threshold=3.749e+02, percent-clipped=0.0 +2023-03-27 00:27:11,024 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-27 00:27:11,153 INFO [finetune.py:976] (5/7) Epoch 20, batch 3050, loss[loss=0.1591, simple_loss=0.2485, pruned_loss=0.03486, over 4760.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.2528, pruned_loss=0.05484, over 955551.93 frames. ], batch size: 28, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:27:18,713 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111881.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:28:14,421 INFO [finetune.py:976] (5/7) Epoch 20, batch 3100, loss[loss=0.2215, simple_loss=0.2764, pruned_loss=0.0833, over 4891.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2522, pruned_loss=0.05495, over 953754.02 frames. ], batch size: 32, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:28:15,706 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=111929.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:28:33,003 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.507e+02 1.737e+02 2.301e+02 5.151e+02, threshold=3.474e+02, percent-clipped=3.0 +2023-03-27 00:28:40,288 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.7107, 1.7172, 1.7597, 0.9803, 1.8550, 2.1029, 1.9832, 1.5087], + device='cuda:5'), covar=tensor([0.0958, 0.0620, 0.0489, 0.0595, 0.0393, 0.0564, 0.0349, 0.0823], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0124, 0.0124, 0.0129, 0.0128, 0.0140, 0.0147], + device='cuda:5'), out_proj_covar=tensor([9.0218e-05, 1.0817e-04, 8.8433e-05, 8.7493e-05, 9.0917e-05, 9.1848e-05, + 1.0056e-04, 1.0525e-04], device='cuda:5') +2023-03-27 00:28:47,144 INFO [finetune.py:976] (5/7) Epoch 20, batch 3150, loss[loss=0.1939, simple_loss=0.2614, pruned_loss=0.06322, over 4906.00 frames. ], tot_loss[loss=0.1785, simple_loss=0.2491, pruned_loss=0.05401, over 953284.01 frames. ], batch size: 36, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:28:57,261 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2912, 3.0479, 2.7620, 1.2487, 2.8760, 2.2403, 2.1832, 2.6187], + device='cuda:5'), covar=tensor([0.0819, 0.0740, 0.1690, 0.2222, 0.1492, 0.2280, 0.2159, 0.1165], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0200, 0.0183, 0.0212, 0.0207, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:29:07,943 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.49 vs. limit=5.0 +2023-03-27 00:29:23,073 INFO [finetune.py:976] (5/7) Epoch 20, batch 3200, loss[loss=0.1709, simple_loss=0.2445, pruned_loss=0.04859, over 4793.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2451, pruned_loss=0.05295, over 954635.75 frames. ], batch size: 29, lr: 3.24e-03, grad_scale: 32.0 +2023-03-27 00:29:43,189 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4296, 1.2648, 1.1959, 1.4079, 1.5285, 1.4329, 0.9300, 1.1786], + device='cuda:5'), covar=tensor([0.2318, 0.2183, 0.2183, 0.1730, 0.1807, 0.1471, 0.2991, 0.2127], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0213, 0.0194, 0.0244, 0.0189, 0.0218, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:29:53,396 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8665, 1.3569, 0.8151, 1.6960, 2.1264, 1.5234, 1.6675, 1.7278], + device='cuda:5'), covar=tensor([0.1738, 0.2682, 0.2398, 0.1494, 0.2110, 0.2344, 0.1786, 0.2635], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0091, 0.0119, 0.0092, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 00:29:56,973 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.705e+01 1.565e+02 1.739e+02 2.228e+02 3.922e+02, threshold=3.479e+02, percent-clipped=1.0 +2023-03-27 00:30:06,301 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112066.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:30:13,834 INFO [finetune.py:976] (5/7) Epoch 20, batch 3250, loss[loss=0.1812, simple_loss=0.2494, pruned_loss=0.05653, over 4294.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2463, pruned_loss=0.05383, over 953157.81 frames. ], batch size: 65, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:30:15,751 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4755, 2.3281, 2.0544, 2.4524, 2.2694, 2.2956, 2.3272, 3.1839], + device='cuda:5'), covar=tensor([0.3844, 0.4925, 0.3482, 0.4448, 0.4783, 0.2627, 0.4212, 0.1679], + device='cuda:5'), in_proj_covar=tensor([0.0285, 0.0261, 0.0230, 0.0275, 0.0251, 0.0221, 0.0250, 0.0231], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:30:48,098 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3555, 1.2896, 1.2461, 0.7630, 1.2343, 1.4483, 1.5118, 1.1749], + device='cuda:5'), covar=tensor([0.0716, 0.0466, 0.0457, 0.0407, 0.0440, 0.0455, 0.0238, 0.0555], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0151, 0.0125, 0.0125, 0.0131, 0.0129, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.0881e-05, 1.0893e-04, 8.9349e-05, 8.8287e-05, 9.2029e-05, 9.2592e-05, + 1.0155e-04, 1.0634e-04], device='cuda:5') +2023-03-27 00:30:54,495 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112124.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:30:56,192 INFO [finetune.py:976] (5/7) Epoch 20, batch 3300, loss[loss=0.2053, simple_loss=0.2963, pruned_loss=0.0572, over 4808.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.2497, pruned_loss=0.05453, over 954086.59 frames. ], batch size: 45, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:30:56,309 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112127.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:31:10,260 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112147.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 00:31:16,116 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.319e+01 1.653e+02 1.982e+02 2.472e+02 3.934e+02, threshold=3.965e+02, percent-clipped=2.0 +2023-03-27 00:31:26,941 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=112172.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:31:29,978 INFO [finetune.py:976] (5/7) Epoch 20, batch 3350, loss[loss=0.2202, simple_loss=0.2841, pruned_loss=0.07817, over 4739.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2518, pruned_loss=0.05521, over 955102.87 frames. ], batch size: 54, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:32:11,816 INFO [finetune.py:976] (5/7) Epoch 20, batch 3400, loss[loss=0.1274, simple_loss=0.2101, pruned_loss=0.02231, over 4797.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2537, pruned_loss=0.05652, over 953566.02 frames. ], batch size: 29, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:32:28,442 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3233, 2.8530, 2.6286, 1.3092, 2.7410, 2.2577, 2.1792, 2.4978], + device='cuda:5'), covar=tensor([0.0872, 0.0885, 0.1842, 0.2115, 0.1391, 0.2076, 0.2049, 0.1137], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0200, 0.0182, 0.0211, 0.0207, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:32:29,026 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0833, 1.8289, 2.4395, 4.0155, 2.7290, 2.7225, 0.9012, 3.3505], + device='cuda:5'), covar=tensor([0.1684, 0.1421, 0.1457, 0.0508, 0.0770, 0.1505, 0.2062, 0.0366], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0163, 0.0100, 0.0134, 0.0123, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 00:32:31,201 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.676e+02 1.964e+02 2.392e+02 4.564e+02, threshold=3.928e+02, percent-clipped=2.0 +2023-03-27 00:32:31,346 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9584, 1.9657, 1.7540, 2.1449, 2.5661, 2.1436, 1.7908, 1.5670], + device='cuda:5'), covar=tensor([0.2279, 0.1987, 0.1976, 0.1714, 0.1717, 0.1203, 0.2354, 0.2136], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0213, 0.0195, 0.0244, 0.0189, 0.0218, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:32:34,360 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6291, 1.7732, 1.5227, 1.5347, 2.2200, 2.0521, 1.8270, 1.8244], + device='cuda:5'), covar=tensor([0.0439, 0.0348, 0.0535, 0.0339, 0.0241, 0.0588, 0.0434, 0.0379], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0108, 0.0145, 0.0112, 0.0101, 0.0112, 0.0100, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.5499e-05, 8.2920e-05, 1.1431e-04, 8.5745e-05, 7.8383e-05, 8.2965e-05, + 7.4686e-05, 8.6228e-05], device='cuda:5') +2023-03-27 00:32:44,381 INFO [finetune.py:976] (5/7) Epoch 20, batch 3450, loss[loss=0.1856, simple_loss=0.2497, pruned_loss=0.06076, over 4301.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2526, pruned_loss=0.05553, over 955441.58 frames. ], batch size: 66, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:33:19,433 INFO [finetune.py:976] (5/7) Epoch 20, batch 3500, loss[loss=0.2077, simple_loss=0.2607, pruned_loss=0.07741, over 4942.00 frames. ], tot_loss[loss=0.1789, simple_loss=0.2493, pruned_loss=0.05428, over 956767.58 frames. ], batch size: 38, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:33:29,339 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5349, 3.4202, 3.2449, 1.4262, 3.4736, 2.5931, 0.9751, 2.4468], + device='cuda:5'), covar=tensor([0.2276, 0.2267, 0.1757, 0.3752, 0.1288, 0.1050, 0.4166, 0.1631], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0177, 0.0160, 0.0129, 0.0162, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 00:33:56,444 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.856e+01 1.454e+02 1.799e+02 2.150e+02 5.052e+02, threshold=3.598e+02, percent-clipped=2.0 +2023-03-27 00:34:18,956 INFO [finetune.py:976] (5/7) Epoch 20, batch 3550, loss[loss=0.1673, simple_loss=0.2424, pruned_loss=0.04609, over 4743.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2455, pruned_loss=0.05279, over 955662.34 frames. ], batch size: 59, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:34:28,790 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4912, 1.5593, 1.9573, 1.7477, 1.7111, 3.6007, 1.5552, 1.6848], + device='cuda:5'), covar=tensor([0.0979, 0.1846, 0.1084, 0.1007, 0.1586, 0.0247, 0.1480, 0.1793], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0077, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:34:36,592 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112389.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:35:17,939 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112422.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:35:20,885 INFO [finetune.py:976] (5/7) Epoch 20, batch 3600, loss[loss=0.1599, simple_loss=0.2305, pruned_loss=0.04464, over 4854.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2434, pruned_loss=0.05194, over 956731.97 frames. ], batch size: 44, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:35:32,781 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-03-27 00:35:37,948 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112447.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 00:35:39,837 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112450.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:35:44,346 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.087e+02 1.527e+02 1.833e+02 2.137e+02 4.874e+02, threshold=3.667e+02, percent-clipped=1.0 +2023-03-27 00:36:08,332 INFO [finetune.py:976] (5/7) Epoch 20, batch 3650, loss[loss=0.1737, simple_loss=0.245, pruned_loss=0.05122, over 4789.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2447, pruned_loss=0.05327, over 952626.53 frames. ], batch size: 29, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:36:20,410 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=112495.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 00:36:41,251 INFO [finetune.py:976] (5/7) Epoch 20, batch 3700, loss[loss=0.2191, simple_loss=0.2906, pruned_loss=0.07376, over 4819.00 frames. ], tot_loss[loss=0.1786, simple_loss=0.2483, pruned_loss=0.05445, over 952017.18 frames. ], batch size: 33, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:37:01,296 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.006e+02 1.568e+02 1.941e+02 2.323e+02 3.962e+02, threshold=3.882e+02, percent-clipped=1.0 +2023-03-27 00:37:03,794 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1664, 2.0026, 1.9805, 0.9255, 2.3457, 2.4654, 2.1236, 1.7345], + device='cuda:5'), covar=tensor([0.1040, 0.0854, 0.0642, 0.0865, 0.0475, 0.0898, 0.0536, 0.0947], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0125, 0.0124, 0.0130, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.0439e-05, 1.0861e-04, 8.9138e-05, 8.7751e-05, 9.1464e-05, 9.2601e-05, + 1.0136e-04, 1.0592e-04], device='cuda:5') +2023-03-27 00:37:05,584 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112562.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:37:15,375 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7366, 4.6939, 4.4465, 2.5903, 4.7803, 3.6059, 1.0600, 3.4413], + device='cuda:5'), covar=tensor([0.2643, 0.1483, 0.1342, 0.2899, 0.0777, 0.0834, 0.4384, 0.1191], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0177, 0.0161, 0.0130, 0.0162, 0.0123, 0.0147, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 00:37:15,915 INFO [finetune.py:976] (5/7) Epoch 20, batch 3750, loss[loss=0.1502, simple_loss=0.2367, pruned_loss=0.03184, over 4857.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2491, pruned_loss=0.05413, over 952970.98 frames. ], batch size: 34, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:37:16,052 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112577.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:37:33,647 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4588, 2.3299, 1.9731, 2.5176, 2.3421, 2.0529, 2.8680, 2.4934], + device='cuda:5'), covar=tensor([0.1248, 0.2356, 0.2819, 0.2616, 0.2479, 0.1566, 0.2975, 0.1674], + device='cuda:5'), in_proj_covar=tensor([0.0185, 0.0188, 0.0234, 0.0252, 0.0246, 0.0203, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:37:46,503 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6289, 2.3966, 1.9930, 2.4018, 2.2107, 2.0218, 2.8966, 2.6050], + device='cuda:5'), covar=tensor([0.1160, 0.1956, 0.2831, 0.2641, 0.2640, 0.1610, 0.3278, 0.1616], + device='cuda:5'), in_proj_covar=tensor([0.0185, 0.0188, 0.0234, 0.0251, 0.0246, 0.0203, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:37:50,100 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112617.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:37:54,210 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112623.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:37:57,418 INFO [finetune.py:976] (5/7) Epoch 20, batch 3800, loss[loss=0.1603, simple_loss=0.2381, pruned_loss=0.04125, over 4814.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.2502, pruned_loss=0.05435, over 953081.29 frames. ], batch size: 39, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:38:04,246 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112638.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 00:38:11,472 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.24 vs. limit=5.0 +2023-03-27 00:38:16,044 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.507e+01 1.515e+02 1.800e+02 2.233e+02 3.828e+02, threshold=3.600e+02, percent-clipped=0.0 +2023-03-27 00:38:27,899 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0883, 1.9514, 2.0956, 1.5828, 1.9276, 2.1836, 2.1561, 1.6115], + device='cuda:5'), covar=tensor([0.0479, 0.0539, 0.0540, 0.0715, 0.0817, 0.0518, 0.0491, 0.1003], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0136, 0.0140, 0.0120, 0.0125, 0.0139, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:38:29,128 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3017, 1.8022, 2.3009, 2.1898, 1.9690, 1.9641, 2.1334, 2.0963], + device='cuda:5'), covar=tensor([0.4121, 0.4245, 0.3097, 0.3830, 0.5025, 0.3881, 0.4761, 0.3109], + device='cuda:5'), in_proj_covar=tensor([0.0254, 0.0242, 0.0263, 0.0281, 0.0279, 0.0254, 0.0290, 0.0244], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:38:30,666 INFO [finetune.py:976] (5/7) Epoch 20, batch 3850, loss[loss=0.1713, simple_loss=0.2404, pruned_loss=0.05115, over 4818.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2489, pruned_loss=0.05384, over 954716.63 frames. ], batch size: 33, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:38:31,367 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112678.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 00:38:46,322 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4532, 2.4834, 2.3274, 1.8870, 2.2180, 2.6856, 2.7683, 2.0778], + device='cuda:5'), covar=tensor([0.0685, 0.0584, 0.0726, 0.0853, 0.0972, 0.0635, 0.0544, 0.1062], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0136, 0.0140, 0.0120, 0.0125, 0.0139, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:38:59,764 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112722.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:38:59,788 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1827, 2.0037, 2.0368, 0.9116, 2.2529, 2.5024, 2.1509, 1.8322], + device='cuda:5'), covar=tensor([0.0813, 0.0688, 0.0578, 0.0654, 0.0526, 0.0598, 0.0498, 0.0690], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0150, 0.0125, 0.0124, 0.0130, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.0292e-05, 1.0846e-04, 8.9019e-05, 8.7496e-05, 9.1263e-05, 9.2405e-05, + 1.0119e-04, 1.0580e-04], device='cuda:5') +2023-03-27 00:39:03,156 INFO [finetune.py:976] (5/7) Epoch 20, batch 3900, loss[loss=0.1494, simple_loss=0.2266, pruned_loss=0.03605, over 4781.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2474, pruned_loss=0.05456, over 953085.55 frames. ], batch size: 26, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:39:15,049 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112745.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:39:21,554 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.403e+01 1.612e+02 1.959e+02 2.410e+02 4.123e+02, threshold=3.918e+02, percent-clipped=1.0 +2023-03-27 00:39:32,077 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=112770.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:39:36,268 INFO [finetune.py:976] (5/7) Epoch 20, batch 3950, loss[loss=0.1429, simple_loss=0.2128, pruned_loss=0.03649, over 4773.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2445, pruned_loss=0.05344, over 953831.81 frames. ], batch size: 54, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:39:51,981 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7099, 2.5462, 2.2028, 1.0786, 2.3953, 2.0394, 1.9130, 2.3588], + device='cuda:5'), covar=tensor([0.0788, 0.0702, 0.1380, 0.2012, 0.1186, 0.2262, 0.2027, 0.0805], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0198, 0.0181, 0.0210, 0.0208, 0.0221, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:40:19,040 INFO [finetune.py:976] (5/7) Epoch 20, batch 4000, loss[loss=0.1668, simple_loss=0.2413, pruned_loss=0.04619, over 4899.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2433, pruned_loss=0.05295, over 955094.59 frames. ], batch size: 35, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:40:48,439 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.638e+02 1.904e+02 2.320e+02 3.891e+02, threshold=3.808e+02, percent-clipped=0.0 +2023-03-27 00:40:50,436 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9944, 1.8652, 1.6169, 1.8576, 1.8354, 1.8148, 1.8578, 2.4606], + device='cuda:5'), covar=tensor([0.3570, 0.4327, 0.3225, 0.3704, 0.3845, 0.2414, 0.3702, 0.1675], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0263, 0.0231, 0.0276, 0.0253, 0.0222, 0.0251, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:40:56,977 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-27 00:41:04,951 INFO [finetune.py:976] (5/7) Epoch 20, batch 4050, loss[loss=0.2097, simple_loss=0.2848, pruned_loss=0.06731, over 4736.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2471, pruned_loss=0.05392, over 955679.18 frames. ], batch size: 59, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:41:23,153 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5250, 3.4834, 3.3146, 1.6344, 3.6557, 2.7174, 0.9325, 2.4887], + device='cuda:5'), covar=tensor([0.2913, 0.2073, 0.1568, 0.3252, 0.1168, 0.1032, 0.4068, 0.1459], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0176, 0.0160, 0.0129, 0.0161, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 00:41:40,692 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9483, 2.6772, 2.4884, 2.9296, 2.7073, 2.6606, 2.6623, 3.5197], + device='cuda:5'), covar=tensor([0.3147, 0.4349, 0.2894, 0.3437, 0.3485, 0.2221, 0.3790, 0.1496], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0263, 0.0232, 0.0277, 0.0253, 0.0223, 0.0252, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:41:41,260 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112918.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:41:43,124 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112921.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:41:47,214 INFO [finetune.py:976] (5/7) Epoch 20, batch 4100, loss[loss=0.1505, simple_loss=0.2256, pruned_loss=0.03764, over 4355.00 frames. ], tot_loss[loss=0.179, simple_loss=0.2492, pruned_loss=0.05439, over 954992.56 frames. ], batch size: 19, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:41:51,357 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112933.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 00:42:06,078 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.570e+02 1.869e+02 2.355e+02 4.214e+02, threshold=3.739e+02, percent-clipped=0.0 +2023-03-27 00:42:07,467 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6963, 1.6093, 1.4709, 1.7709, 2.0978, 1.8478, 1.3865, 1.4000], + device='cuda:5'), covar=tensor([0.2207, 0.2077, 0.2005, 0.1554, 0.1634, 0.1202, 0.2451, 0.1928], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0209, 0.0211, 0.0193, 0.0241, 0.0187, 0.0216, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:42:17,470 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112973.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 00:42:19,818 INFO [finetune.py:976] (5/7) Epoch 20, batch 4150, loss[loss=0.1915, simple_loss=0.2553, pruned_loss=0.06384, over 4880.00 frames. ], tot_loss[loss=0.1807, simple_loss=0.2512, pruned_loss=0.05512, over 957158.57 frames. ], batch size: 32, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:42:24,006 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112982.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:43:03,966 INFO [finetune.py:976] (5/7) Epoch 20, batch 4200, loss[loss=0.1451, simple_loss=0.2179, pruned_loss=0.03615, over 4847.00 frames. ], tot_loss[loss=0.18, simple_loss=0.2508, pruned_loss=0.05459, over 955213.10 frames. ], batch size: 44, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:43:16,406 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113045.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:43:23,434 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.027e+02 1.578e+02 1.900e+02 2.258e+02 5.235e+02, threshold=3.800e+02, percent-clipped=4.0 +2023-03-27 00:43:36,994 INFO [finetune.py:976] (5/7) Epoch 20, batch 4250, loss[loss=0.1362, simple_loss=0.1989, pruned_loss=0.0368, over 4808.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2479, pruned_loss=0.0537, over 954925.97 frames. ], batch size: 25, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:43:47,717 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=113093.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:44:08,930 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-27 00:44:10,288 INFO [finetune.py:976] (5/7) Epoch 20, batch 4300, loss[loss=0.122, simple_loss=0.2, pruned_loss=0.02203, over 4825.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2452, pruned_loss=0.05276, over 954984.90 frames. ], batch size: 25, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:44:26,974 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.0460, 4.4341, 4.6443, 4.8366, 4.8139, 4.4354, 5.1328, 1.6771], + device='cuda:5'), covar=tensor([0.0687, 0.0740, 0.0863, 0.0965, 0.0969, 0.1611, 0.0527, 0.5547], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0243, 0.0278, 0.0292, 0.0331, 0.0283, 0.0304, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:44:30,856 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.679e+01 1.396e+02 1.741e+02 2.023e+02 4.034e+02, threshold=3.482e+02, percent-clipped=1.0 +2023-03-27 00:44:43,637 INFO [finetune.py:976] (5/7) Epoch 20, batch 4350, loss[loss=0.1653, simple_loss=0.2357, pruned_loss=0.04747, over 4868.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.243, pruned_loss=0.05184, over 955694.15 frames. ], batch size: 31, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:45:12,196 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113218.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:45:17,619 INFO [finetune.py:976] (5/7) Epoch 20, batch 4400, loss[loss=0.1723, simple_loss=0.2423, pruned_loss=0.05121, over 4907.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2431, pruned_loss=0.05199, over 951944.67 frames. ], batch size: 28, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:45:21,835 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113233.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:45:22,473 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113234.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:45:49,234 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.729e+01 1.603e+02 1.846e+02 2.173e+02 5.642e+02, threshold=3.692e+02, percent-clipped=4.0 +2023-03-27 00:46:00,754 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=113266.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:46:08,367 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113273.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 00:46:10,701 INFO [finetune.py:976] (5/7) Epoch 20, batch 4450, loss[loss=0.1924, simple_loss=0.2676, pruned_loss=0.05864, over 4818.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2462, pruned_loss=0.05232, over 952571.91 frames. ], batch size: 33, lr: 3.23e-03, grad_scale: 32.0 +2023-03-27 00:46:10,769 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113277.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:46:13,204 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=113281.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:46:23,186 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7805, 2.5323, 2.0622, 1.0231, 2.1850, 2.0515, 2.0928, 2.2233], + device='cuda:5'), covar=tensor([0.0711, 0.0744, 0.1520, 0.2047, 0.1308, 0.2164, 0.1842, 0.0931], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0192, 0.0199, 0.0182, 0.0210, 0.0209, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:46:23,197 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113295.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:46:50,259 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=113321.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:46:53,838 INFO [finetune.py:976] (5/7) Epoch 20, batch 4500, loss[loss=0.1835, simple_loss=0.2314, pruned_loss=0.06774, over 4694.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2469, pruned_loss=0.05209, over 951426.69 frames. ], batch size: 23, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:47:13,422 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.617e+02 2.027e+02 2.372e+02 5.258e+02, threshold=4.055e+02, percent-clipped=2.0 +2023-03-27 00:47:27,582 INFO [finetune.py:976] (5/7) Epoch 20, batch 4550, loss[loss=0.1853, simple_loss=0.2528, pruned_loss=0.05891, over 4730.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2479, pruned_loss=0.05242, over 952843.72 frames. ], batch size: 54, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:48:03,324 INFO [finetune.py:976] (5/7) Epoch 20, batch 4600, loss[loss=0.1895, simple_loss=0.2479, pruned_loss=0.06555, over 4821.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2485, pruned_loss=0.05266, over 953164.57 frames. ], batch size: 38, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:48:07,498 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-27 00:48:31,337 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.757e+01 1.614e+02 1.832e+02 2.145e+02 3.668e+02, threshold=3.664e+02, percent-clipped=0.0 +2023-03-27 00:48:45,586 INFO [finetune.py:976] (5/7) Epoch 20, batch 4650, loss[loss=0.1244, simple_loss=0.1862, pruned_loss=0.03132, over 4113.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2453, pruned_loss=0.05185, over 953310.58 frames. ], batch size: 17, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:48:50,006 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-03-27 00:49:19,012 INFO [finetune.py:976] (5/7) Epoch 20, batch 4700, loss[loss=0.1051, simple_loss=0.1847, pruned_loss=0.01275, over 4721.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2419, pruned_loss=0.05041, over 954619.71 frames. ], batch size: 23, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:49:37,210 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.713e+01 1.518e+02 1.819e+02 2.172e+02 5.123e+02, threshold=3.639e+02, percent-clipped=2.0 +2023-03-27 00:49:51,494 INFO [finetune.py:976] (5/7) Epoch 20, batch 4750, loss[loss=0.1541, simple_loss=0.2214, pruned_loss=0.04337, over 4925.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2399, pruned_loss=0.05001, over 954225.75 frames. ], batch size: 38, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:49:52,070 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113577.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:50:00,073 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113590.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:50:07,355 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.85 vs. limit=5.0 +2023-03-27 00:50:10,343 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-27 00:50:18,366 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113617.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:50:23,694 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=113625.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:50:24,846 INFO [finetune.py:976] (5/7) Epoch 20, batch 4800, loss[loss=0.1693, simple_loss=0.2502, pruned_loss=0.04422, over 4898.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2431, pruned_loss=0.05163, over 954163.90 frames. ], batch size: 35, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:50:30,820 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3741, 1.4491, 1.2061, 1.4207, 1.6906, 1.5344, 1.4383, 1.2228], + device='cuda:5'), covar=tensor([0.0379, 0.0246, 0.0568, 0.0268, 0.0193, 0.0445, 0.0291, 0.0381], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0107, 0.0144, 0.0111, 0.0100, 0.0111, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5126e-05, 8.2368e-05, 1.1318e-04, 8.5082e-05, 7.7839e-05, 8.2069e-05, + 7.4501e-05, 8.5448e-05], device='cuda:5') +2023-03-27 00:50:37,456 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113646.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:50:43,816 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.583e+02 1.918e+02 2.188e+02 4.674e+02, threshold=3.836e+02, percent-clipped=2.0 +2023-03-27 00:51:01,327 INFO [finetune.py:976] (5/7) Epoch 20, batch 4850, loss[loss=0.174, simple_loss=0.2398, pruned_loss=0.05407, over 4693.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.2481, pruned_loss=0.05414, over 952687.39 frames. ], batch size: 23, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:51:02,042 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113678.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:51:14,819 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5695, 1.3790, 1.8204, 2.9869, 1.9805, 2.2197, 0.9468, 2.5053], + device='cuda:5'), covar=tensor([0.1771, 0.1507, 0.1343, 0.0664, 0.0844, 0.1252, 0.1861, 0.0540], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0165, 0.0101, 0.0137, 0.0125, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 00:51:34,433 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113707.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:51:57,945 INFO [finetune.py:976] (5/7) Epoch 20, batch 4900, loss[loss=0.1343, simple_loss=0.2215, pruned_loss=0.02351, over 4769.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.2498, pruned_loss=0.05463, over 954017.49 frames. ], batch size: 26, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:52:20,125 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.649e+02 2.053e+02 2.498e+02 4.513e+02, threshold=4.105e+02, percent-clipped=3.0 +2023-03-27 00:52:34,815 INFO [finetune.py:976] (5/7) Epoch 20, batch 4950, loss[loss=0.1358, simple_loss=0.2186, pruned_loss=0.02649, over 4744.00 frames. ], tot_loss[loss=0.18, simple_loss=0.2507, pruned_loss=0.05462, over 954292.58 frames. ], batch size: 27, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:53:07,554 INFO [finetune.py:976] (5/7) Epoch 20, batch 5000, loss[loss=0.1573, simple_loss=0.2227, pruned_loss=0.04595, over 4820.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2488, pruned_loss=0.05349, over 954619.71 frames. ], batch size: 33, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:53:26,538 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.026e+02 1.516e+02 1.779e+02 2.023e+02 5.156e+02, threshold=3.559e+02, percent-clipped=2.0 +2023-03-27 00:53:39,284 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2827, 1.8712, 2.4135, 1.5572, 2.2151, 2.4028, 1.7627, 2.5891], + device='cuda:5'), covar=tensor([0.1186, 0.1894, 0.1328, 0.1947, 0.0876, 0.1375, 0.2671, 0.0762], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0206, 0.0190, 0.0189, 0.0175, 0.0213, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:53:42,070 INFO [finetune.py:976] (5/7) Epoch 20, batch 5050, loss[loss=0.1613, simple_loss=0.2383, pruned_loss=0.04213, over 4903.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2461, pruned_loss=0.05268, over 956217.43 frames. ], batch size: 28, lr: 3.22e-03, grad_scale: 64.0 +2023-03-27 00:53:51,007 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113890.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:54:08,485 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3849, 1.5488, 1.2991, 1.5857, 1.8580, 1.7180, 1.5053, 1.3811], + device='cuda:5'), covar=tensor([0.0369, 0.0263, 0.0568, 0.0247, 0.0158, 0.0422, 0.0285, 0.0366], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0107, 0.0143, 0.0111, 0.0100, 0.0111, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.4825e-05, 8.2160e-05, 1.1248e-04, 8.4814e-05, 7.7456e-05, 8.1871e-05, + 7.4082e-05, 8.5356e-05], device='cuda:5') +2023-03-27 00:54:14,771 INFO [finetune.py:976] (5/7) Epoch 20, batch 5100, loss[loss=0.1752, simple_loss=0.244, pruned_loss=0.05318, over 4826.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2426, pruned_loss=0.05128, over 956832.16 frames. ], batch size: 33, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:54:23,012 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=113938.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:54:35,767 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.073e+02 1.567e+02 1.826e+02 2.193e+02 3.507e+02, threshold=3.652e+02, percent-clipped=0.0 +2023-03-27 00:54:38,926 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5252, 1.3843, 1.7526, 1.7215, 1.4718, 3.2519, 1.3210, 1.4200], + device='cuda:5'), covar=tensor([0.0961, 0.1962, 0.1099, 0.0973, 0.1746, 0.0284, 0.1612, 0.1825], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0082, 0.0075, 0.0077, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 00:54:40,130 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3889, 1.9341, 2.5833, 4.3791, 3.1719, 2.8579, 0.8484, 3.6996], + device='cuda:5'), covar=tensor([0.1462, 0.1385, 0.1373, 0.0477, 0.0654, 0.1299, 0.2082, 0.0305], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0117, 0.0135, 0.0166, 0.0101, 0.0138, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 00:54:46,070 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113973.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:54:48,408 INFO [finetune.py:976] (5/7) Epoch 20, batch 5150, loss[loss=0.1444, simple_loss=0.2116, pruned_loss=0.0386, over 4724.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2425, pruned_loss=0.05193, over 955949.16 frames. ], batch size: 23, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:55:07,782 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114002.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:55:07,875 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1296, 1.4040, 2.1112, 1.9965, 1.8748, 1.8236, 1.9075, 1.9806], + device='cuda:5'), covar=tensor([0.3442, 0.3902, 0.3402, 0.3345, 0.4916, 0.3773, 0.4395, 0.2907], + device='cuda:5'), in_proj_covar=tensor([0.0254, 0.0243, 0.0263, 0.0282, 0.0279, 0.0256, 0.0290, 0.0245], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:55:23,297 INFO [finetune.py:976] (5/7) Epoch 20, batch 5200, loss[loss=0.191, simple_loss=0.2636, pruned_loss=0.05922, over 4818.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2458, pruned_loss=0.05319, over 954619.60 frames. ], batch size: 38, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:55:43,820 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.581e+02 1.814e+02 2.243e+02 3.815e+02, threshold=3.628e+02, percent-clipped=1.0 +2023-03-27 00:55:56,414 INFO [finetune.py:976] (5/7) Epoch 20, batch 5250, loss[loss=0.1951, simple_loss=0.2733, pruned_loss=0.05846, over 4919.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2479, pruned_loss=0.05358, over 955766.68 frames. ], batch size: 42, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:56:07,012 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8229, 1.5631, 1.8657, 1.2226, 1.7468, 1.9162, 1.4266, 2.1286], + device='cuda:5'), covar=tensor([0.1085, 0.2106, 0.1311, 0.1722, 0.0928, 0.1211, 0.3256, 0.0802], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0207, 0.0191, 0.0191, 0.0175, 0.0214, 0.0220, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:56:11,909 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-27 00:56:36,235 INFO [finetune.py:976] (5/7) Epoch 20, batch 5300, loss[loss=0.2102, simple_loss=0.2927, pruned_loss=0.06383, over 4890.00 frames. ], tot_loss[loss=0.1785, simple_loss=0.2494, pruned_loss=0.05385, over 956443.57 frames. ], batch size: 46, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:56:36,357 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114127.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:57:12,913 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8488, 1.2335, 1.8650, 1.8359, 1.6581, 1.6182, 1.7311, 1.7225], + device='cuda:5'), covar=tensor([0.3821, 0.3984, 0.3354, 0.3737, 0.4793, 0.3889, 0.4472, 0.3047], + device='cuda:5'), in_proj_covar=tensor([0.0253, 0.0241, 0.0261, 0.0280, 0.0278, 0.0255, 0.0288, 0.0244], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:57:13,341 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.589e+01 1.501e+02 1.834e+02 2.338e+02 4.244e+02, threshold=3.669e+02, percent-clipped=1.0 +2023-03-27 00:57:30,111 INFO [finetune.py:976] (5/7) Epoch 20, batch 5350, loss[loss=0.1436, simple_loss=0.2137, pruned_loss=0.03671, over 4753.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2497, pruned_loss=0.05383, over 956778.35 frames. ], batch size: 26, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:57:37,353 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114188.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:57:57,960 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=5.16 vs. limit=5.0 +2023-03-27 00:58:01,648 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.77 vs. limit=5.0 +2023-03-27 00:58:03,281 INFO [finetune.py:976] (5/7) Epoch 20, batch 5400, loss[loss=0.2007, simple_loss=0.2612, pruned_loss=0.07012, over 4848.00 frames. ], tot_loss[loss=0.1784, simple_loss=0.2485, pruned_loss=0.05422, over 957572.77 frames. ], batch size: 31, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:58:23,337 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.275e+01 1.521e+02 1.786e+02 2.103e+02 5.074e+02, threshold=3.573e+02, percent-clipped=1.0 +2023-03-27 00:58:33,642 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114273.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:58:34,877 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8654, 1.7964, 1.9082, 1.1616, 1.9674, 2.0255, 1.9418, 1.5524], + device='cuda:5'), covar=tensor([0.0639, 0.0708, 0.0632, 0.0919, 0.0690, 0.0612, 0.0575, 0.1198], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0136, 0.0140, 0.0121, 0.0125, 0.0139, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:58:35,980 INFO [finetune.py:976] (5/7) Epoch 20, batch 5450, loss[loss=0.1629, simple_loss=0.2366, pruned_loss=0.04459, over 4912.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.2466, pruned_loss=0.05395, over 956635.43 frames. ], batch size: 36, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:58:51,606 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114302.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:58:53,296 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3185, 2.9179, 3.0865, 3.2426, 3.1270, 2.8940, 3.3651, 0.9574], + device='cuda:5'), covar=tensor([0.1089, 0.1018, 0.1136, 0.1148, 0.1573, 0.1881, 0.1065, 0.5458], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0242, 0.0278, 0.0291, 0.0330, 0.0284, 0.0303, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:59:05,065 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=114321.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:59:08,646 INFO [finetune.py:976] (5/7) Epoch 20, batch 5500, loss[loss=0.12, simple_loss=0.1947, pruned_loss=0.02267, over 4795.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2428, pruned_loss=0.05267, over 953758.22 frames. ], batch size: 26, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:59:16,052 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5813, 2.2916, 1.7487, 0.9488, 2.0083, 2.1135, 1.9132, 2.0641], + device='cuda:5'), covar=tensor([0.0752, 0.0713, 0.1508, 0.1772, 0.1246, 0.2170, 0.1812, 0.0808], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0199, 0.0182, 0.0209, 0.0209, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 00:59:23,095 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=114350.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 00:59:27,722 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.653e+01 1.524e+02 1.755e+02 2.254e+02 4.886e+02, threshold=3.510e+02, percent-clipped=3.0 +2023-03-27 00:59:42,372 INFO [finetune.py:976] (5/7) Epoch 20, batch 5550, loss[loss=0.1858, simple_loss=0.2623, pruned_loss=0.0547, over 4739.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2451, pruned_loss=0.05374, over 953529.32 frames. ], batch size: 54, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 00:59:49,172 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7136, 3.6948, 3.5796, 1.4226, 3.7870, 2.9082, 0.8330, 2.5662], + device='cuda:5'), covar=tensor([0.2237, 0.2070, 0.1473, 0.3820, 0.1070, 0.0935, 0.4530, 0.1549], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0130, 0.0161, 0.0123, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 01:00:00,271 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 01:00:14,062 INFO [finetune.py:976] (5/7) Epoch 20, batch 5600, loss[loss=0.1537, simple_loss=0.225, pruned_loss=0.04118, over 4890.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2475, pruned_loss=0.05381, over 954122.81 frames. ], batch size: 32, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 01:00:15,090 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-03-27 01:00:31,887 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.587e+02 1.911e+02 2.256e+02 4.682e+02, threshold=3.822e+02, percent-clipped=4.0 +2023-03-27 01:00:43,490 INFO [finetune.py:976] (5/7) Epoch 20, batch 5650, loss[loss=0.1711, simple_loss=0.2351, pruned_loss=0.05357, over 4709.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2497, pruned_loss=0.05384, over 954755.34 frames. ], batch size: 23, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 01:00:43,563 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4801, 1.3073, 2.0215, 1.8937, 1.5813, 3.7627, 1.2359, 1.4270], + device='cuda:5'), covar=tensor([0.1286, 0.2458, 0.1669, 0.1165, 0.1977, 0.0274, 0.2058, 0.2549], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:00:46,984 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114483.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:00:50,084 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-03-27 01:00:53,855 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0549, 1.3712, 2.0793, 1.9850, 1.8554, 1.7993, 1.8981, 1.9801], + device='cuda:5'), covar=tensor([0.3784, 0.3977, 0.3200, 0.3497, 0.4764, 0.3565, 0.4388, 0.2904], + device='cuda:5'), in_proj_covar=tensor([0.0254, 0.0242, 0.0262, 0.0281, 0.0279, 0.0255, 0.0290, 0.0245], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:00:56,332 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6321, 1.0528, 0.7355, 1.3791, 2.0766, 0.7715, 1.2625, 1.4029], + device='cuda:5'), covar=tensor([0.1497, 0.2169, 0.1755, 0.1172, 0.1853, 0.1974, 0.1474, 0.2157], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0091, 0.0120, 0.0093, 0.0097, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 01:00:57,508 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7168, 1.5525, 2.2592, 3.4980, 2.3489, 2.5525, 1.3375, 2.8826], + device='cuda:5'), covar=tensor([0.1746, 0.1486, 0.1319, 0.0497, 0.0816, 0.1151, 0.1735, 0.0452], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0163, 0.0100, 0.0135, 0.0123, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 01:01:13,265 INFO [finetune.py:976] (5/7) Epoch 20, batch 5700, loss[loss=0.1385, simple_loss=0.1996, pruned_loss=0.03867, over 4107.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2464, pruned_loss=0.05305, over 942398.20 frames. ], batch size: 18, lr: 3.22e-03, grad_scale: 32.0 +2023-03-27 01:01:24,541 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1159, 2.7049, 2.6871, 1.1756, 2.8051, 2.0879, 0.6834, 1.8801], + device='cuda:5'), covar=tensor([0.2091, 0.2387, 0.1575, 0.3325, 0.1354, 0.1070, 0.3779, 0.1405], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0177, 0.0160, 0.0129, 0.0161, 0.0123, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 01:01:39,091 INFO [finetune.py:976] (5/7) Epoch 21, batch 0, loss[loss=0.1375, simple_loss=0.215, pruned_loss=0.02997, over 4700.00 frames. ], tot_loss[loss=0.1375, simple_loss=0.215, pruned_loss=0.02997, over 4700.00 frames. ], batch size: 23, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:01:39,091 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 01:01:46,311 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8741, 3.4597, 3.5771, 3.7835, 3.6270, 3.4649, 3.9291, 1.2868], + device='cuda:5'), covar=tensor([0.0833, 0.0770, 0.0795, 0.0886, 0.1339, 0.1534, 0.0694, 0.5233], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0241, 0.0277, 0.0289, 0.0330, 0.0283, 0.0301, 0.0296], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:01:52,342 INFO [finetune.py:1010] (5/7) Epoch 21, validation: loss=0.1598, simple_loss=0.2277, pruned_loss=0.0459, over 2265189.00 frames. +2023-03-27 01:01:52,342 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 01:01:56,943 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.573e+01 1.356e+02 1.658e+02 2.014e+02 3.472e+02, threshold=3.316e+02, percent-clipped=0.0 +2023-03-27 01:02:29,067 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8328, 1.4046, 1.9541, 1.8791, 1.6472, 1.6256, 1.8085, 1.7920], + device='cuda:5'), covar=tensor([0.3654, 0.3473, 0.2812, 0.3168, 0.4315, 0.3528, 0.3786, 0.2632], + device='cuda:5'), in_proj_covar=tensor([0.0254, 0.0243, 0.0263, 0.0281, 0.0280, 0.0255, 0.0290, 0.0245], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:02:47,784 INFO [finetune.py:976] (5/7) Epoch 21, batch 50, loss[loss=0.1494, simple_loss=0.2275, pruned_loss=0.03564, over 4883.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.2516, pruned_loss=0.05476, over 217287.26 frames. ], batch size: 32, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:03:04,152 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0877, 1.9183, 1.6958, 1.6957, 1.8813, 1.8222, 1.8655, 2.5483], + device='cuda:5'), covar=tensor([0.3718, 0.4056, 0.3180, 0.3671, 0.3732, 0.2401, 0.3745, 0.1640], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0262, 0.0231, 0.0276, 0.0252, 0.0222, 0.0252, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:03:12,722 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-03-27 01:03:21,580 INFO [finetune.py:976] (5/7) Epoch 21, batch 100, loss[loss=0.1834, simple_loss=0.2538, pruned_loss=0.05653, over 4866.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2466, pruned_loss=0.05301, over 382620.07 frames. ], batch size: 31, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:03:23,374 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.865e+01 1.560e+02 1.971e+02 2.354e+02 5.080e+02, threshold=3.943e+02, percent-clipped=2.0 +2023-03-27 01:03:23,568 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 01:03:54,255 INFO [finetune.py:976] (5/7) Epoch 21, batch 150, loss[loss=0.1446, simple_loss=0.2143, pruned_loss=0.03745, over 4856.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2398, pruned_loss=0.0502, over 511882.63 frames. ], batch size: 44, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:04:02,533 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114716.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:04:03,493 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-27 01:04:16,926 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5962, 2.2872, 1.8592, 0.9737, 2.0788, 2.0158, 1.8556, 2.1059], + device='cuda:5'), covar=tensor([0.0751, 0.0858, 0.1679, 0.2078, 0.1390, 0.2325, 0.2268, 0.1013], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0198, 0.0182, 0.0209, 0.0209, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:04:26,909 INFO [finetune.py:976] (5/7) Epoch 21, batch 200, loss[loss=0.1271, simple_loss=0.2019, pruned_loss=0.02611, over 4830.00 frames. ], tot_loss[loss=0.1695, simple_loss=0.2387, pruned_loss=0.05014, over 611347.12 frames. ], batch size: 33, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:04:29,189 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.562e+02 1.886e+02 2.300e+02 5.249e+02, threshold=3.772e+02, percent-clipped=1.0 +2023-03-27 01:04:42,938 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114777.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:04:46,593 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114783.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:05:00,771 INFO [finetune.py:976] (5/7) Epoch 21, batch 250, loss[loss=0.2012, simple_loss=0.2829, pruned_loss=0.05972, over 4880.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.242, pruned_loss=0.05188, over 687295.65 frames. ], batch size: 34, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:05:19,224 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=114831.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:05:33,145 INFO [finetune.py:976] (5/7) Epoch 21, batch 300, loss[loss=0.1866, simple_loss=0.2581, pruned_loss=0.05759, over 4835.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2449, pruned_loss=0.05226, over 745936.20 frames. ], batch size: 33, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:05:36,362 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.500e+02 1.787e+02 2.137e+02 3.935e+02, threshold=3.575e+02, percent-clipped=3.0 +2023-03-27 01:05:45,145 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5785, 1.4603, 1.4687, 1.5331, 0.9697, 2.9829, 1.1078, 1.5384], + device='cuda:5'), covar=tensor([0.3380, 0.2689, 0.2236, 0.2532, 0.1956, 0.0265, 0.2898, 0.1353], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:05:46,345 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114871.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:05:51,804 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6921, 1.9362, 1.5823, 1.5649, 2.2984, 2.1395, 1.8271, 1.8704], + device='cuda:5'), covar=tensor([0.0435, 0.0346, 0.0598, 0.0371, 0.0244, 0.0640, 0.0394, 0.0391], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0108, 0.0145, 0.0112, 0.0100, 0.0111, 0.0100, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.5750e-05, 8.2828e-05, 1.1374e-04, 8.5712e-05, 7.8034e-05, 8.2437e-05, + 7.4557e-05, 8.5994e-05], device='cuda:5') +2023-03-27 01:06:06,529 INFO [finetune.py:976] (5/7) Epoch 21, batch 350, loss[loss=0.2237, simple_loss=0.2829, pruned_loss=0.08225, over 4820.00 frames. ], tot_loss[loss=0.178, simple_loss=0.2484, pruned_loss=0.05377, over 790774.80 frames. ], batch size: 33, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:06:26,475 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114932.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:06:40,074 INFO [finetune.py:976] (5/7) Epoch 21, batch 400, loss[loss=0.2037, simple_loss=0.2756, pruned_loss=0.06593, over 4889.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2515, pruned_loss=0.05536, over 825343.14 frames. ], batch size: 37, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:06:41,874 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.016e+02 1.627e+02 1.950e+02 2.383e+02 4.205e+02, threshold=3.900e+02, percent-clipped=3.0 +2023-03-27 01:07:20,655 INFO [finetune.py:976] (5/7) Epoch 21, batch 450, loss[loss=0.1746, simple_loss=0.2397, pruned_loss=0.05471, over 4869.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.2499, pruned_loss=0.05451, over 853824.20 frames. ], batch size: 31, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:08:02,752 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.0964, 3.5381, 3.7081, 3.9273, 3.8678, 3.5808, 4.1710, 1.3682], + device='cuda:5'), covar=tensor([0.0895, 0.0906, 0.0960, 0.1090, 0.1324, 0.1735, 0.0768, 0.5337], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0242, 0.0277, 0.0291, 0.0330, 0.0282, 0.0302, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:08:06,670 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.92 vs. limit=5.0 +2023-03-27 01:08:11,194 INFO [finetune.py:976] (5/7) Epoch 21, batch 500, loss[loss=0.1613, simple_loss=0.2297, pruned_loss=0.0464, over 4908.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2464, pruned_loss=0.05299, over 875781.06 frames. ], batch size: 37, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:08:13,016 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.154e+02 1.445e+02 1.728e+02 2.124e+02 2.919e+02, threshold=3.456e+02, percent-clipped=0.0 +2023-03-27 01:08:24,213 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115072.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:08:33,767 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115086.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:08:45,014 INFO [finetune.py:976] (5/7) Epoch 21, batch 550, loss[loss=0.1746, simple_loss=0.2374, pruned_loss=0.05589, over 4875.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2438, pruned_loss=0.05197, over 893740.21 frames. ], batch size: 34, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:08:52,893 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9968, 1.9154, 1.7059, 2.0469, 2.5312, 2.2449, 1.7127, 1.6591], + device='cuda:5'), covar=tensor([0.2042, 0.1801, 0.1758, 0.1523, 0.1395, 0.1035, 0.2155, 0.1778], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0210, 0.0212, 0.0194, 0.0242, 0.0187, 0.0217, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:09:14,150 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115147.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:09:18,262 INFO [finetune.py:976] (5/7) Epoch 21, batch 600, loss[loss=0.1578, simple_loss=0.2299, pruned_loss=0.04283, over 4817.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2436, pruned_loss=0.05197, over 906700.95 frames. ], batch size: 30, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:09:19,199 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 01:09:20,112 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.558e+02 1.835e+02 2.263e+02 4.639e+02, threshold=3.670e+02, percent-clipped=5.0 +2023-03-27 01:09:51,870 INFO [finetune.py:976] (5/7) Epoch 21, batch 650, loss[loss=0.2027, simple_loss=0.286, pruned_loss=0.05974, over 4757.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2476, pruned_loss=0.05358, over 915577.64 frames. ], batch size: 59, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:10:02,769 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5254, 1.3567, 1.9948, 1.8571, 1.5239, 3.3664, 1.3119, 1.4442], + device='cuda:5'), covar=tensor([0.1006, 0.2059, 0.1339, 0.1044, 0.1797, 0.0278, 0.1775, 0.2100], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0076, 0.0092, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:10:07,432 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115227.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:10:25,139 INFO [finetune.py:976] (5/7) Epoch 21, batch 700, loss[loss=0.1717, simple_loss=0.254, pruned_loss=0.04469, over 4897.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2497, pruned_loss=0.05383, over 924205.26 frames. ], batch size: 32, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:10:26,915 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.090e+02 1.664e+02 1.957e+02 2.283e+02 3.730e+02, threshold=3.914e+02, percent-clipped=1.0 +2023-03-27 01:10:58,921 INFO [finetune.py:976] (5/7) Epoch 21, batch 750, loss[loss=0.2014, simple_loss=0.2444, pruned_loss=0.07921, over 4218.00 frames. ], tot_loss[loss=0.1784, simple_loss=0.2498, pruned_loss=0.05354, over 930813.81 frames. ], batch size: 18, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:10:59,656 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9830, 1.8721, 1.5881, 1.7498, 1.9244, 1.7429, 2.1959, 1.9906], + device='cuda:5'), covar=tensor([0.1488, 0.2080, 0.3090, 0.2786, 0.2809, 0.1737, 0.3048, 0.1968], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0189, 0.0237, 0.0255, 0.0249, 0.0205, 0.0217, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:10:59,967 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-03-27 01:11:31,743 INFO [finetune.py:976] (5/7) Epoch 21, batch 800, loss[loss=0.171, simple_loss=0.2401, pruned_loss=0.05095, over 4860.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.2499, pruned_loss=0.05328, over 937185.74 frames. ], batch size: 31, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:11:33,561 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.426e+02 1.720e+02 2.044e+02 3.360e+02, threshold=3.441e+02, percent-clipped=0.0 +2023-03-27 01:11:41,499 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115370.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:11:42,134 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115371.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:11:42,700 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115372.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:12:04,585 INFO [finetune.py:976] (5/7) Epoch 21, batch 850, loss[loss=0.1715, simple_loss=0.2491, pruned_loss=0.04697, over 4900.00 frames. ], tot_loss[loss=0.178, simple_loss=0.2489, pruned_loss=0.05358, over 940153.55 frames. ], batch size: 36, lr: 3.21e-03, grad_scale: 32.0 +2023-03-27 01:12:14,865 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=115420.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:12:16,192 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.32 vs. limit=5.0 +2023-03-27 01:12:24,434 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115431.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:12:29,883 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115432.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:12:32,195 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6251, 1.5971, 1.5330, 1.6231, 1.1750, 3.5522, 1.3791, 1.7104], + device='cuda:5'), covar=tensor([0.3576, 0.2609, 0.2188, 0.2524, 0.1810, 0.0213, 0.2395, 0.1296], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:12:40,127 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1288, 1.7429, 2.1629, 1.4505, 2.0659, 2.1177, 1.6437, 2.3758], + device='cuda:5'), covar=tensor([0.1335, 0.2125, 0.1458, 0.1998, 0.1126, 0.1601, 0.2993, 0.0891], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0206, 0.0192, 0.0191, 0.0176, 0.0214, 0.0220, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:12:41,979 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115442.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:12:54,665 INFO [finetune.py:976] (5/7) Epoch 21, batch 900, loss[loss=0.1472, simple_loss=0.2267, pruned_loss=0.03378, over 4726.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2476, pruned_loss=0.05358, over 944852.98 frames. ], batch size: 23, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:13:00,780 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.494e+02 1.776e+02 2.140e+02 4.219e+02, threshold=3.551e+02, percent-clipped=3.0 +2023-03-27 01:13:37,348 INFO [finetune.py:976] (5/7) Epoch 21, batch 950, loss[loss=0.1516, simple_loss=0.2156, pruned_loss=0.04387, over 4828.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2459, pruned_loss=0.05341, over 948458.30 frames. ], batch size: 30, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:13:51,939 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115527.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:14:11,294 INFO [finetune.py:976] (5/7) Epoch 21, batch 1000, loss[loss=0.1542, simple_loss=0.2264, pruned_loss=0.041, over 4828.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2475, pruned_loss=0.05382, over 949526.49 frames. ], batch size: 25, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:14:13,114 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.550e+02 1.849e+02 2.159e+02 3.452e+02, threshold=3.698e+02, percent-clipped=0.0 +2023-03-27 01:14:21,536 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.21 vs. limit=5.0 +2023-03-27 01:14:24,471 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=115575.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:14:25,770 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9504, 1.9154, 1.5180, 1.7295, 1.8153, 1.7870, 1.8393, 2.4737], + device='cuda:5'), covar=tensor([0.3799, 0.3744, 0.3270, 0.3818, 0.4116, 0.2394, 0.3508, 0.1949], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0261, 0.0231, 0.0275, 0.0251, 0.0221, 0.0251, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:14:30,024 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3694, 1.4979, 1.1887, 1.5033, 1.7503, 1.5717, 1.4864, 1.3199], + device='cuda:5'), covar=tensor([0.0347, 0.0314, 0.0624, 0.0278, 0.0192, 0.0589, 0.0321, 0.0415], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0108, 0.0146, 0.0113, 0.0101, 0.0111, 0.0101, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.6214e-05, 8.3330e-05, 1.1471e-04, 8.6393e-05, 7.8309e-05, 8.2489e-05, + 7.5151e-05, 8.6716e-05], device='cuda:5') +2023-03-27 01:14:44,024 INFO [finetune.py:976] (5/7) Epoch 21, batch 1050, loss[loss=0.2005, simple_loss=0.2777, pruned_loss=0.06164, over 4825.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.2489, pruned_loss=0.05378, over 952354.62 frames. ], batch size: 39, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:14:47,124 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1817, 1.4944, 0.7422, 1.9591, 2.3568, 1.6036, 1.8485, 1.8673], + device='cuda:5'), covar=tensor([0.1357, 0.1932, 0.2054, 0.1131, 0.1898, 0.2039, 0.1314, 0.1893], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0091, 0.0119, 0.0093, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 01:14:53,174 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-27 01:15:16,666 INFO [finetune.py:976] (5/7) Epoch 21, batch 1100, loss[loss=0.1752, simple_loss=0.2459, pruned_loss=0.05221, over 4891.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2487, pruned_loss=0.05333, over 953319.81 frames. ], batch size: 37, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:15:19,443 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.043e+02 1.582e+02 1.822e+02 2.328e+02 4.675e+02, threshold=3.643e+02, percent-clipped=4.0 +2023-03-27 01:15:42,298 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.65 vs. limit=5.0 +2023-03-27 01:15:50,437 INFO [finetune.py:976] (5/7) Epoch 21, batch 1150, loss[loss=0.231, simple_loss=0.2936, pruned_loss=0.08426, over 4145.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2505, pruned_loss=0.05394, over 955279.45 frames. ], batch size: 65, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:16:01,890 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0350, 2.0496, 1.6489, 0.8351, 1.6921, 1.7271, 1.6083, 1.8578], + device='cuda:5'), covar=tensor([0.0821, 0.0547, 0.1192, 0.1698, 0.1174, 0.1868, 0.1946, 0.0772], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0198, 0.0182, 0.0210, 0.0208, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:16:05,318 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115726.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:16:05,929 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115727.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:16:14,926 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115742.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:16:24,030 INFO [finetune.py:976] (5/7) Epoch 21, batch 1200, loss[loss=0.1625, simple_loss=0.2389, pruned_loss=0.04308, over 4865.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2491, pruned_loss=0.05358, over 952686.42 frames. ], batch size: 31, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:16:25,821 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.936e+01 1.465e+02 1.737e+02 2.048e+02 4.574e+02, threshold=3.475e+02, percent-clipped=2.0 +2023-03-27 01:16:36,136 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.04 vs. limit=5.0 +2023-03-27 01:16:40,220 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9253, 1.5334, 2.0289, 1.9774, 1.7965, 1.7109, 1.9736, 1.8659], + device='cuda:5'), covar=tensor([0.4112, 0.3807, 0.3205, 0.3418, 0.4467, 0.3813, 0.4218, 0.2965], + device='cuda:5'), in_proj_covar=tensor([0.0256, 0.0244, 0.0264, 0.0283, 0.0281, 0.0257, 0.0291, 0.0246], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:16:47,357 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=115790.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:16:56,823 INFO [finetune.py:976] (5/7) Epoch 21, batch 1250, loss[loss=0.1982, simple_loss=0.264, pruned_loss=0.06615, over 4935.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2467, pruned_loss=0.05303, over 953251.63 frames. ], batch size: 38, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:17:09,242 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8029, 1.6979, 2.2330, 3.5104, 2.4576, 2.5093, 1.0765, 2.9286], + device='cuda:5'), covar=tensor([0.1720, 0.1367, 0.1366, 0.0547, 0.0743, 0.1409, 0.1945, 0.0455], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0163, 0.0100, 0.0136, 0.0124, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 01:17:29,534 INFO [finetune.py:976] (5/7) Epoch 21, batch 1300, loss[loss=0.1712, simple_loss=0.2363, pruned_loss=0.05303, over 4865.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2437, pruned_loss=0.05197, over 953975.57 frames. ], batch size: 31, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:17:32,371 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.105e+02 1.563e+02 1.759e+02 2.181e+02 4.124e+02, threshold=3.519e+02, percent-clipped=1.0 +2023-03-27 01:18:12,458 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115892.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 01:18:23,979 INFO [finetune.py:976] (5/7) Epoch 21, batch 1350, loss[loss=0.2152, simple_loss=0.2862, pruned_loss=0.07209, over 4855.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.244, pruned_loss=0.05212, over 955052.50 frames. ], batch size: 49, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:18:45,070 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 01:19:00,575 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115953.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 01:19:01,047 INFO [finetune.py:976] (5/7) Epoch 21, batch 1400, loss[loss=0.2069, simple_loss=0.2779, pruned_loss=0.06798, over 4892.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2472, pruned_loss=0.05323, over 953600.24 frames. ], batch size: 32, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:19:02,863 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.014e+02 1.541e+02 1.818e+02 2.071e+02 3.575e+02, threshold=3.635e+02, percent-clipped=1.0 +2023-03-27 01:19:35,504 INFO [finetune.py:976] (5/7) Epoch 21, batch 1450, loss[loss=0.1899, simple_loss=0.26, pruned_loss=0.05988, over 4846.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.2514, pruned_loss=0.05516, over 951426.79 frames. ], batch size: 31, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:19:42,987 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116014.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:19:43,102 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-03-27 01:19:51,745 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116026.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:19:52,356 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116027.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:20:09,070 INFO [finetune.py:976] (5/7) Epoch 21, batch 1500, loss[loss=0.194, simple_loss=0.2579, pruned_loss=0.06508, over 4911.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2525, pruned_loss=0.05513, over 952031.13 frames. ], batch size: 36, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:20:10,898 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.719e+02 2.058e+02 2.312e+02 4.180e+02, threshold=4.116e+02, percent-clipped=2.0 +2023-03-27 01:20:23,169 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=116074.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:20:23,793 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=116075.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:20:23,857 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116075.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 01:20:42,655 INFO [finetune.py:976] (5/7) Epoch 21, batch 1550, loss[loss=0.1698, simple_loss=0.2404, pruned_loss=0.04962, over 4739.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.2525, pruned_loss=0.05464, over 951820.57 frames. ], batch size: 27, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:20:43,386 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116105.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:20:50,842 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116116.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:21:03,636 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3366, 2.3721, 1.8756, 2.6682, 2.3082, 1.9988, 2.8987, 2.4385], + device='cuda:5'), covar=tensor([0.1261, 0.2234, 0.2846, 0.2277, 0.2515, 0.1576, 0.2870, 0.1697], + device='cuda:5'), in_proj_covar=tensor([0.0186, 0.0188, 0.0235, 0.0254, 0.0248, 0.0203, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:21:14,839 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6991, 1.6367, 1.5333, 1.6781, 1.0440, 3.7715, 1.4145, 1.8365], + device='cuda:5'), covar=tensor([0.3276, 0.2442, 0.2178, 0.2397, 0.1861, 0.0167, 0.2523, 0.1271], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0115, 0.0120, 0.0122, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:21:15,952 INFO [finetune.py:976] (5/7) Epoch 21, batch 1600, loss[loss=0.129, simple_loss=0.2062, pruned_loss=0.02595, over 4766.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2502, pruned_loss=0.0541, over 953414.95 frames. ], batch size: 27, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:21:17,776 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.061e+01 1.555e+02 1.841e+02 2.223e+02 4.654e+02, threshold=3.683e+02, percent-clipped=1.0 +2023-03-27 01:21:23,358 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116166.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:21:24,671 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 01:21:31,971 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116177.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:21:49,853 INFO [finetune.py:976] (5/7) Epoch 21, batch 1650, loss[loss=0.1765, simple_loss=0.2566, pruned_loss=0.04824, over 4918.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2476, pruned_loss=0.05299, over 955867.36 frames. ], batch size: 37, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:22:15,302 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5360, 1.4303, 1.3190, 1.4917, 1.7902, 1.7071, 1.4298, 1.3025], + device='cuda:5'), covar=tensor([0.0310, 0.0335, 0.0610, 0.0295, 0.0220, 0.0405, 0.0368, 0.0413], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0108, 0.0145, 0.0113, 0.0101, 0.0111, 0.0101, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.5948e-05, 8.2893e-05, 1.1437e-04, 8.6396e-05, 7.8250e-05, 8.2325e-05, + 7.4981e-05, 8.6639e-05], device='cuda:5') +2023-03-27 01:22:19,470 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116248.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 01:22:23,486 INFO [finetune.py:976] (5/7) Epoch 21, batch 1700, loss[loss=0.148, simple_loss=0.2146, pruned_loss=0.04066, over 4898.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.245, pruned_loss=0.05217, over 954611.92 frames. ], batch size: 35, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:22:25,327 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.524e+01 1.502e+02 1.774e+02 2.116e+02 3.203e+02, threshold=3.548e+02, percent-clipped=0.0 +2023-03-27 01:22:40,111 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8619, 2.5389, 2.1137, 1.1203, 2.3088, 2.1880, 1.9391, 2.2874], + device='cuda:5'), covar=tensor([0.0638, 0.0810, 0.1467, 0.2017, 0.1406, 0.1835, 0.1988, 0.0926], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0192, 0.0198, 0.0182, 0.0210, 0.0209, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:22:43,170 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1045, 1.9567, 2.1897, 2.1160, 1.8880, 1.9361, 2.0837, 2.0607], + device='cuda:5'), covar=tensor([0.3867, 0.3522, 0.2805, 0.3781, 0.4741, 0.3762, 0.4196, 0.2823], + device='cuda:5'), in_proj_covar=tensor([0.0254, 0.0242, 0.0262, 0.0281, 0.0280, 0.0256, 0.0290, 0.0245], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:22:59,248 INFO [finetune.py:976] (5/7) Epoch 21, batch 1750, loss[loss=0.1208, simple_loss=0.1959, pruned_loss=0.02287, over 4690.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2464, pruned_loss=0.05282, over 953849.74 frames. ], batch size: 23, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:23:19,662 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116323.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:23:30,989 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4138, 1.3737, 1.2866, 1.4635, 0.9707, 2.8532, 1.0529, 1.4707], + device='cuda:5'), covar=tensor([0.3239, 0.2513, 0.2190, 0.2298, 0.1841, 0.0271, 0.2894, 0.1278], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0123, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:23:58,747 INFO [finetune.py:976] (5/7) Epoch 21, batch 1800, loss[loss=0.16, simple_loss=0.2253, pruned_loss=0.04734, over 4805.00 frames. ], tot_loss[loss=0.1789, simple_loss=0.2503, pruned_loss=0.05372, over 956567.96 frames. ], batch size: 25, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:24:00,588 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.248e+02 1.627e+02 1.938e+02 2.363e+02 5.057e+02, threshold=3.876e+02, percent-clipped=3.0 +2023-03-27 01:24:04,029 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-27 01:24:08,546 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116370.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 01:24:18,616 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116384.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:24:30,117 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 01:24:31,788 INFO [finetune.py:976] (5/7) Epoch 21, batch 1850, loss[loss=0.176, simple_loss=0.2509, pruned_loss=0.05059, over 4886.00 frames. ], tot_loss[loss=0.1793, simple_loss=0.2511, pruned_loss=0.05376, over 959355.60 frames. ], batch size: 43, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:24:34,766 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.5917, 3.9932, 4.2283, 4.4436, 4.4021, 4.1101, 4.6748, 1.5243], + device='cuda:5'), covar=tensor([0.0753, 0.0851, 0.1045, 0.0933, 0.1191, 0.1588, 0.0647, 0.5440], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0239, 0.0277, 0.0288, 0.0329, 0.0282, 0.0300, 0.0295], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:24:59,656 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8675, 1.6411, 2.1856, 1.4818, 2.0755, 2.0468, 1.4793, 2.2601], + device='cuda:5'), covar=tensor([0.1518, 0.2292, 0.1600, 0.2284, 0.0951, 0.1655, 0.3233, 0.0853], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0204, 0.0190, 0.0189, 0.0173, 0.0213, 0.0219, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:25:00,257 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116446.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:25:05,404 INFO [finetune.py:976] (5/7) Epoch 21, batch 1900, loss[loss=0.1705, simple_loss=0.2458, pruned_loss=0.04759, over 4817.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2517, pruned_loss=0.05373, over 958816.40 frames. ], batch size: 38, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:25:07,232 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.037e+02 1.572e+02 1.880e+02 2.123e+02 3.861e+02, threshold=3.760e+02, percent-clipped=0.0 +2023-03-27 01:25:10,211 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116461.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:25:16,988 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116472.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:25:35,769 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-03-27 01:25:38,739 INFO [finetune.py:976] (5/7) Epoch 21, batch 1950, loss[loss=0.1368, simple_loss=0.2155, pruned_loss=0.02903, over 4728.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.249, pruned_loss=0.05226, over 958788.13 frames. ], batch size: 54, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:25:39,461 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116505.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:25:40,696 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116507.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 01:25:49,115 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5269, 1.7075, 1.6594, 0.8563, 1.7781, 2.0034, 1.8915, 1.4738], + device='cuda:5'), covar=tensor([0.0993, 0.0632, 0.0545, 0.0671, 0.0495, 0.0533, 0.0409, 0.0731], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0125, 0.0124, 0.0130, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.0070e-05, 1.0789e-04, 8.9464e-05, 8.7321e-05, 9.1531e-05, 9.2018e-05, + 1.0150e-04, 1.0587e-04], device='cuda:5') +2023-03-27 01:26:07,966 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116548.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 01:26:11,459 INFO [finetune.py:976] (5/7) Epoch 21, batch 2000, loss[loss=0.1966, simple_loss=0.2609, pruned_loss=0.06609, over 4882.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2467, pruned_loss=0.05189, over 959177.41 frames. ], batch size: 43, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:26:13,787 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.123e+01 1.433e+02 1.690e+02 2.067e+02 3.885e+02, threshold=3.380e+02, percent-clipped=2.0 +2023-03-27 01:26:19,755 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116566.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:26:39,353 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=116596.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 01:26:39,413 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2805, 1.5236, 1.5537, 0.8840, 1.5333, 1.7912, 1.7692, 1.3909], + device='cuda:5'), covar=tensor([0.0892, 0.0596, 0.0546, 0.0529, 0.0531, 0.0568, 0.0362, 0.0707], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0125, 0.0124, 0.0130, 0.0128, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9837e-05, 1.0803e-04, 8.9403e-05, 8.7394e-05, 9.1482e-05, 9.1759e-05, + 1.0159e-04, 1.0590e-04], device='cuda:5') +2023-03-27 01:26:44,684 INFO [finetune.py:976] (5/7) Epoch 21, batch 2050, loss[loss=0.1534, simple_loss=0.2371, pruned_loss=0.0349, over 4794.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2435, pruned_loss=0.05089, over 960156.13 frames. ], batch size: 29, lr: 3.20e-03, grad_scale: 64.0 +2023-03-27 01:26:54,230 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116618.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:26:56,684 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0508, 2.7581, 2.5167, 1.3901, 2.7524, 2.1357, 1.9697, 2.4523], + device='cuda:5'), covar=tensor([0.0885, 0.0785, 0.1757, 0.2120, 0.1481, 0.2293, 0.2315, 0.1109], + device='cuda:5'), in_proj_covar=tensor([0.0168, 0.0191, 0.0197, 0.0181, 0.0207, 0.0207, 0.0221, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:27:11,612 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.10 vs. limit=2.0 +2023-03-27 01:27:18,449 INFO [finetune.py:976] (5/7) Epoch 21, batch 2100, loss[loss=0.2419, simple_loss=0.3144, pruned_loss=0.0847, over 4843.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2434, pruned_loss=0.05116, over 958697.91 frames. ], batch size: 49, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:27:20,846 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.015e+02 1.617e+02 1.783e+02 2.198e+02 6.495e+02, threshold=3.567e+02, percent-clipped=4.0 +2023-03-27 01:27:29,057 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116670.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 01:27:34,428 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116679.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:27:34,470 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116679.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:27:51,925 INFO [finetune.py:976] (5/7) Epoch 21, batch 2150, loss[loss=0.1804, simple_loss=0.2627, pruned_loss=0.04904, over 4928.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2462, pruned_loss=0.05214, over 958086.46 frames. ], batch size: 42, lr: 3.20e-03, grad_scale: 32.0 +2023-03-27 01:27:52,036 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116704.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:28:00,966 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=116718.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:28:26,698 INFO [finetune.py:976] (5/7) Epoch 21, batch 2200, loss[loss=0.1733, simple_loss=0.2454, pruned_loss=0.05053, over 4867.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.249, pruned_loss=0.05339, over 959660.25 frames. ], batch size: 34, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:28:30,714 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.107e+02 1.636e+02 2.054e+02 2.505e+02 6.138e+02, threshold=4.108e+02, percent-clipped=5.0 +2023-03-27 01:28:32,679 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116761.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:28:39,683 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116765.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:28:44,465 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116772.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:28:53,697 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2501, 1.2990, 1.5468, 1.5806, 1.4477, 2.9460, 1.2987, 1.4832], + device='cuda:5'), covar=tensor([0.1042, 0.1918, 0.1141, 0.0975, 0.1712, 0.0284, 0.1593, 0.1829], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0075, 0.0077, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:29:09,222 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.51 vs. limit=5.0 +2023-03-27 01:29:19,242 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116802.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 01:29:19,274 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116802.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:29:20,384 INFO [finetune.py:976] (5/7) Epoch 21, batch 2250, loss[loss=0.1944, simple_loss=0.2642, pruned_loss=0.06226, over 4916.00 frames. ], tot_loss[loss=0.179, simple_loss=0.2503, pruned_loss=0.05382, over 958970.48 frames. ], batch size: 41, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:29:28,970 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=116809.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:29:39,990 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=116820.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:30:01,469 INFO [finetune.py:976] (5/7) Epoch 21, batch 2300, loss[loss=0.1519, simple_loss=0.2259, pruned_loss=0.03891, over 4738.00 frames. ], tot_loss[loss=0.1788, simple_loss=0.2509, pruned_loss=0.05336, over 959326.95 frames. ], batch size: 27, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:30:04,886 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.477e+02 1.740e+02 2.172e+02 4.454e+02, threshold=3.479e+02, percent-clipped=1.0 +2023-03-27 01:30:06,812 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116861.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:30:08,069 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116863.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:30:34,135 INFO [finetune.py:976] (5/7) Epoch 21, batch 2350, loss[loss=0.1898, simple_loss=0.2534, pruned_loss=0.06311, over 4815.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2492, pruned_loss=0.0533, over 960163.16 frames. ], batch size: 39, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:31:07,400 INFO [finetune.py:976] (5/7) Epoch 21, batch 2400, loss[loss=0.1884, simple_loss=0.2477, pruned_loss=0.06461, over 4928.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2457, pruned_loss=0.05213, over 956640.81 frames. ], batch size: 38, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:31:09,767 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.566e+02 1.863e+02 2.219e+02 3.648e+02, threshold=3.726e+02, percent-clipped=1.0 +2023-03-27 01:31:16,933 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6771, 1.5861, 1.5550, 1.6323, 1.0029, 2.7612, 1.2019, 1.5752], + device='cuda:5'), covar=tensor([0.3004, 0.2232, 0.1938, 0.2218, 0.1761, 0.0322, 0.2188, 0.1106], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0120, 0.0123, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:31:21,637 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116974.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:31:25,171 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116979.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:31:36,699 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.4697, 2.9505, 2.7242, 1.3928, 2.9113, 2.5210, 2.3558, 2.6357], + device='cuda:5'), covar=tensor([0.0820, 0.0890, 0.1842, 0.2139, 0.1546, 0.1919, 0.2068, 0.1085], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0192, 0.0199, 0.0183, 0.0209, 0.0208, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:31:40,996 INFO [finetune.py:976] (5/7) Epoch 21, batch 2450, loss[loss=0.1794, simple_loss=0.2463, pruned_loss=0.05629, over 4919.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2426, pruned_loss=0.05086, over 955170.28 frames. ], batch size: 36, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:31:57,424 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=117027.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:32:14,705 INFO [finetune.py:976] (5/7) Epoch 21, batch 2500, loss[loss=0.125, simple_loss=0.2058, pruned_loss=0.0221, over 4775.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.244, pruned_loss=0.05164, over 955413.98 frames. ], batch size: 26, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:32:17,116 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.025e+02 1.510e+02 1.797e+02 2.340e+02 3.968e+02, threshold=3.593e+02, percent-clipped=1.0 +2023-03-27 01:32:18,390 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117060.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:32:40,304 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-27 01:32:46,785 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117102.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:32:47,907 INFO [finetune.py:976] (5/7) Epoch 21, batch 2550, loss[loss=0.173, simple_loss=0.25, pruned_loss=0.048, over 4932.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2465, pruned_loss=0.05191, over 955521.48 frames. ], batch size: 38, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:32:52,336 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8346, 2.8307, 2.7053, 1.9355, 2.6650, 2.9419, 2.9925, 2.4920], + device='cuda:5'), covar=tensor([0.0546, 0.0543, 0.0620, 0.0809, 0.0580, 0.0693, 0.0539, 0.0957], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0134, 0.0138, 0.0119, 0.0124, 0.0137, 0.0139, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:33:07,516 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7432, 3.8191, 3.5498, 1.7024, 3.9722, 3.0152, 0.9018, 2.6361], + device='cuda:5'), covar=tensor([0.2413, 0.1847, 0.1434, 0.3456, 0.1063, 0.0940, 0.4181, 0.1577], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0178, 0.0158, 0.0130, 0.0161, 0.0123, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 01:33:19,399 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=117150.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:33:21,772 INFO [finetune.py:976] (5/7) Epoch 21, batch 2600, loss[loss=0.1701, simple_loss=0.2526, pruned_loss=0.04384, over 4888.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2477, pruned_loss=0.0518, over 955861.37 frames. ], batch size: 32, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:33:24,216 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.102e+02 1.533e+02 1.830e+02 2.226e+02 4.351e+02, threshold=3.661e+02, percent-clipped=3.0 +2023-03-27 01:33:24,301 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117158.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:33:26,120 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117161.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:33:38,972 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5717, 1.9708, 1.4916, 1.6617, 2.3258, 2.1755, 1.9252, 1.8658], + device='cuda:5'), covar=tensor([0.0567, 0.0327, 0.0640, 0.0340, 0.0245, 0.0665, 0.0313, 0.0394], + device='cuda:5'), in_proj_covar=tensor([0.0097, 0.0107, 0.0143, 0.0112, 0.0099, 0.0110, 0.0099, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5503e-05, 8.1804e-05, 1.1277e-04, 8.5667e-05, 7.6888e-05, 8.1206e-05, + 7.3981e-05, 8.5809e-05], device='cuda:5') +2023-03-27 01:33:48,241 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-27 01:34:06,272 INFO [finetune.py:976] (5/7) Epoch 21, batch 2650, loss[loss=0.1838, simple_loss=0.2615, pruned_loss=0.05306, over 4902.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2488, pruned_loss=0.05241, over 952646.22 frames. ], batch size: 37, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:34:09,392 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=117209.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:34:18,226 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2892, 1.3397, 1.8091, 1.7055, 1.5526, 3.3049, 1.2214, 1.4220], + device='cuda:5'), covar=tensor([0.1341, 0.2570, 0.1321, 0.1258, 0.2127, 0.0276, 0.2215, 0.2651], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0076, 0.0091, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:34:26,104 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117221.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:35:01,505 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.29 vs. limit=5.0 +2023-03-27 01:35:03,767 INFO [finetune.py:976] (5/7) Epoch 21, batch 2700, loss[loss=0.1714, simple_loss=0.2438, pruned_loss=0.04949, over 4736.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2484, pruned_loss=0.05203, over 950724.60 frames. ], batch size: 54, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:35:06,201 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.522e+02 1.732e+02 2.127e+02 4.053e+02, threshold=3.464e+02, percent-clipped=3.0 +2023-03-27 01:35:23,764 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117274.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:35:30,659 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117282.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:35:45,281 INFO [finetune.py:976] (5/7) Epoch 21, batch 2750, loss[loss=0.1666, simple_loss=0.2438, pruned_loss=0.04475, over 4764.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.245, pruned_loss=0.05085, over 950326.68 frames. ], batch size: 26, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:35:52,670 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3444, 2.3126, 2.1058, 1.2823, 2.2655, 1.9623, 1.8366, 2.2132], + device='cuda:5'), covar=tensor([0.0809, 0.0572, 0.1259, 0.1637, 0.0921, 0.1675, 0.1798, 0.0748], + device='cuda:5'), in_proj_covar=tensor([0.0167, 0.0189, 0.0194, 0.0180, 0.0206, 0.0205, 0.0219, 0.0192], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:35:56,249 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=117322.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:36:01,563 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117329.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:36:18,645 INFO [finetune.py:976] (5/7) Epoch 21, batch 2800, loss[loss=0.1687, simple_loss=0.2311, pruned_loss=0.0531, over 4263.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2423, pruned_loss=0.05037, over 950395.46 frames. ], batch size: 18, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:36:21,563 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.020e+02 1.495e+02 1.752e+02 2.115e+02 2.888e+02, threshold=3.503e+02, percent-clipped=0.0 +2023-03-27 01:36:22,905 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117360.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:36:42,939 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117390.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:36:48,594 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.48 vs. limit=5.0 +2023-03-27 01:36:49,651 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6004, 1.2576, 0.7754, 1.4750, 1.9594, 1.0347, 1.3599, 1.4671], + device='cuda:5'), covar=tensor([0.1513, 0.1998, 0.1910, 0.1210, 0.2072, 0.2025, 0.1457, 0.1863], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0092, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 01:36:52,478 INFO [finetune.py:976] (5/7) Epoch 21, batch 2850, loss[loss=0.1834, simple_loss=0.251, pruned_loss=0.05791, over 4836.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2417, pruned_loss=0.05059, over 953015.64 frames. ], batch size: 33, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:36:54,200 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-27 01:36:54,973 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=117408.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:37:25,543 INFO [finetune.py:976] (5/7) Epoch 21, batch 2900, loss[loss=0.1729, simple_loss=0.2597, pruned_loss=0.04303, over 4810.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2458, pruned_loss=0.05192, over 953657.16 frames. ], batch size: 38, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:37:28,392 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.016e+02 1.554e+02 1.875e+02 2.295e+02 6.888e+02, threshold=3.749e+02, percent-clipped=2.0 +2023-03-27 01:37:28,516 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117458.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:37:32,140 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 01:37:57,558 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7525, 1.6828, 1.4637, 1.8914, 2.3965, 1.8996, 1.6861, 1.4235], + device='cuda:5'), covar=tensor([0.2452, 0.2142, 0.2204, 0.1634, 0.1625, 0.1303, 0.2361, 0.2048], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0210, 0.0213, 0.0195, 0.0243, 0.0188, 0.0218, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:37:59,205 INFO [finetune.py:976] (5/7) Epoch 21, batch 2950, loss[loss=0.2247, simple_loss=0.2885, pruned_loss=0.08044, over 4824.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.249, pruned_loss=0.05263, over 955273.65 frames. ], batch size: 39, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:38:00,492 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=117506.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:38:00,544 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8454, 1.8142, 1.6742, 1.7856, 1.3917, 4.4409, 1.7224, 2.1441], + device='cuda:5'), covar=tensor([0.3293, 0.2401, 0.2098, 0.2299, 0.1599, 0.0121, 0.2343, 0.1142], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0123, 0.0114, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:38:32,248 INFO [finetune.py:976] (5/7) Epoch 21, batch 3000, loss[loss=0.1788, simple_loss=0.2424, pruned_loss=0.05758, over 4863.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2492, pruned_loss=0.05295, over 955288.88 frames. ], batch size: 31, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:38:32,248 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 01:38:34,025 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9057, 1.1123, 1.9997, 1.8583, 1.7448, 1.6535, 1.7242, 1.8628], + device='cuda:5'), covar=tensor([0.3871, 0.4198, 0.3556, 0.4433, 0.5217, 0.4012, 0.4789, 0.3194], + device='cuda:5'), in_proj_covar=tensor([0.0255, 0.0243, 0.0264, 0.0283, 0.0281, 0.0257, 0.0291, 0.0245], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:38:42,800 INFO [finetune.py:1010] (5/7) Epoch 21, validation: loss=0.1567, simple_loss=0.2253, pruned_loss=0.04408, over 2265189.00 frames. +2023-03-27 01:38:42,801 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 01:38:45,673 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.534e+02 1.924e+02 2.362e+02 3.621e+02, threshold=3.849e+02, percent-clipped=0.0 +2023-03-27 01:38:48,300 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-03-27 01:38:54,034 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-27 01:39:00,140 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117577.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:39:17,644 INFO [finetune.py:976] (5/7) Epoch 21, batch 3050, loss[loss=0.1597, simple_loss=0.2324, pruned_loss=0.04357, over 4818.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2493, pruned_loss=0.05284, over 954500.47 frames. ], batch size: 25, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:39:45,055 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 01:40:13,668 INFO [finetune.py:976] (5/7) Epoch 21, batch 3100, loss[loss=0.1488, simple_loss=0.225, pruned_loss=0.03624, over 4897.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2478, pruned_loss=0.05246, over 954699.89 frames. ], batch size: 32, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:40:19,667 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.886e+01 1.482e+02 1.759e+02 2.208e+02 4.258e+02, threshold=3.518e+02, percent-clipped=1.0 +2023-03-27 01:40:46,224 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9576, 1.7067, 2.2576, 1.3886, 2.0360, 2.2094, 1.5471, 2.3047], + device='cuda:5'), covar=tensor([0.1383, 0.2101, 0.1449, 0.2091, 0.0929, 0.1301, 0.3090, 0.0907], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0205, 0.0191, 0.0190, 0.0174, 0.0214, 0.0218, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:40:46,800 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117685.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:40:58,310 INFO [finetune.py:976] (5/7) Epoch 21, batch 3150, loss[loss=0.2452, simple_loss=0.2995, pruned_loss=0.09549, over 4824.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2461, pruned_loss=0.05286, over 952461.20 frames. ], batch size: 39, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:41:31,634 INFO [finetune.py:976] (5/7) Epoch 21, batch 3200, loss[loss=0.1433, simple_loss=0.2033, pruned_loss=0.04163, over 4802.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2433, pruned_loss=0.05193, over 952882.58 frames. ], batch size: 25, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:41:34,035 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.569e+02 1.801e+02 2.101e+02 4.822e+02, threshold=3.602e+02, percent-clipped=2.0 +2023-03-27 01:42:02,037 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2151, 2.8789, 2.7993, 1.3466, 3.0216, 2.2290, 0.8493, 1.8816], + device='cuda:5'), covar=tensor([0.2478, 0.2543, 0.1651, 0.3468, 0.1390, 0.1111, 0.4021, 0.1789], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0178, 0.0157, 0.0130, 0.0160, 0.0122, 0.0147, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 01:42:05,173 INFO [finetune.py:976] (5/7) Epoch 21, batch 3250, loss[loss=0.2127, simple_loss=0.282, pruned_loss=0.07171, over 4845.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2439, pruned_loss=0.05245, over 951568.27 frames. ], batch size: 49, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:42:23,380 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2780, 2.8805, 2.6846, 1.1662, 2.9574, 2.2270, 0.7090, 1.7733], + device='cuda:5'), covar=tensor([0.2389, 0.2106, 0.1760, 0.3733, 0.1473, 0.1145, 0.4343, 0.1791], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0177, 0.0157, 0.0129, 0.0160, 0.0122, 0.0146, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 01:42:38,404 INFO [finetune.py:976] (5/7) Epoch 21, batch 3300, loss[loss=0.1978, simple_loss=0.266, pruned_loss=0.06475, over 4934.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2476, pruned_loss=0.05377, over 951828.88 frames. ], batch size: 38, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:42:40,848 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.638e+02 1.917e+02 2.241e+02 9.038e+02, threshold=3.833e+02, percent-clipped=2.0 +2023-03-27 01:42:54,838 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117877.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:43:11,570 INFO [finetune.py:976] (5/7) Epoch 21, batch 3350, loss[loss=0.1904, simple_loss=0.2571, pruned_loss=0.06178, over 4120.00 frames. ], tot_loss[loss=0.1805, simple_loss=0.2506, pruned_loss=0.05522, over 950334.56 frames. ], batch size: 65, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:43:25,777 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=117925.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:43:45,045 INFO [finetune.py:976] (5/7) Epoch 21, batch 3400, loss[loss=0.1626, simple_loss=0.2465, pruned_loss=0.0393, over 4929.00 frames. ], tot_loss[loss=0.181, simple_loss=0.2518, pruned_loss=0.05516, over 952991.18 frames. ], batch size: 33, lr: 3.19e-03, grad_scale: 32.0 +2023-03-27 01:43:47,452 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.619e+02 1.880e+02 2.233e+02 5.629e+02, threshold=3.761e+02, percent-clipped=2.0 +2023-03-27 01:44:05,873 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117985.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:44:18,635 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4317, 1.5112, 1.2558, 1.4658, 1.7911, 1.6592, 1.4535, 1.3319], + device='cuda:5'), covar=tensor([0.0338, 0.0297, 0.0642, 0.0295, 0.0198, 0.0478, 0.0297, 0.0378], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0107, 0.0144, 0.0112, 0.0099, 0.0110, 0.0101, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.5933e-05, 8.2125e-05, 1.1337e-04, 8.5943e-05, 7.7362e-05, 8.1266e-05, + 7.4969e-05, 8.6007e-05], device='cuda:5') +2023-03-27 01:44:19,702 INFO [finetune.py:976] (5/7) Epoch 21, batch 3450, loss[loss=0.1562, simple_loss=0.2225, pruned_loss=0.04494, over 4773.00 frames. ], tot_loss[loss=0.179, simple_loss=0.2503, pruned_loss=0.05386, over 954306.24 frames. ], batch size: 59, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:44:21,227 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-27 01:44:39,326 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=118033.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:44:39,415 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3874, 2.2783, 1.8209, 2.2304, 2.3178, 2.0766, 2.6385, 2.3945], + device='cuda:5'), covar=tensor([0.1251, 0.1989, 0.2932, 0.2583, 0.2302, 0.1571, 0.2523, 0.1802], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0188, 0.0235, 0.0253, 0.0247, 0.0204, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:44:42,940 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1534, 1.9526, 1.4429, 0.6064, 1.6588, 1.7758, 1.6004, 1.8152], + device='cuda:5'), covar=tensor([0.0781, 0.0775, 0.1381, 0.1963, 0.1285, 0.2302, 0.2211, 0.0832], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0190, 0.0197, 0.0182, 0.0208, 0.0208, 0.0220, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:44:44,132 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118040.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:44:59,762 INFO [finetune.py:976] (5/7) Epoch 21, batch 3500, loss[loss=0.1675, simple_loss=0.2439, pruned_loss=0.04554, over 4907.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2488, pruned_loss=0.05347, over 953915.24 frames. ], batch size: 36, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:45:02,212 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.127e+01 1.501e+02 1.833e+02 2.184e+02 3.839e+02, threshold=3.666e+02, percent-clipped=2.0 +2023-03-27 01:45:37,292 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5345, 2.4419, 2.1389, 2.5289, 2.3795, 2.3141, 2.3708, 3.0054], + device='cuda:5'), covar=tensor([0.3099, 0.3495, 0.2965, 0.3214, 0.3247, 0.2232, 0.3560, 0.1605], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0234, 0.0278, 0.0254, 0.0225, 0.0254, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:45:52,332 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118101.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:45:57,921 INFO [finetune.py:976] (5/7) Epoch 21, batch 3550, loss[loss=0.1525, simple_loss=0.2325, pruned_loss=0.0363, over 4836.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2451, pruned_loss=0.05234, over 953635.62 frames. ], batch size: 33, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:46:27,271 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-27 01:46:30,093 INFO [finetune.py:976] (5/7) Epoch 21, batch 3600, loss[loss=0.2253, simple_loss=0.2829, pruned_loss=0.08383, over 4888.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2432, pruned_loss=0.05212, over 955011.60 frames. ], batch size: 32, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:46:33,041 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.102e+02 1.558e+02 1.902e+02 2.180e+02 3.976e+02, threshold=3.804e+02, percent-clipped=2.0 +2023-03-27 01:46:39,822 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118168.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:46:51,073 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7130, 1.2615, 0.8751, 1.5857, 2.0664, 1.3155, 1.3971, 1.6455], + device='cuda:5'), covar=tensor([0.1484, 0.2016, 0.1885, 0.1186, 0.1941, 0.1872, 0.1500, 0.1874], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0093, 0.0121, 0.0094, 0.0099, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 01:46:54,040 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5930, 1.5456, 1.3171, 1.7284, 1.8764, 1.7474, 1.2229, 1.3306], + device='cuda:5'), covar=tensor([0.2323, 0.2069, 0.2185, 0.1697, 0.1762, 0.1321, 0.2793, 0.2131], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0210, 0.0213, 0.0195, 0.0244, 0.0189, 0.0218, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:47:03,626 INFO [finetune.py:976] (5/7) Epoch 21, batch 3650, loss[loss=0.1391, simple_loss=0.2164, pruned_loss=0.03088, over 4776.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2449, pruned_loss=0.05276, over 955056.51 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:47:19,808 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5014, 2.2720, 1.8488, 0.9436, 1.9797, 1.8730, 1.7300, 2.0309], + device='cuda:5'), covar=tensor([0.0947, 0.0906, 0.1954, 0.2401, 0.1665, 0.2596, 0.2567, 0.1090], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0197, 0.0182, 0.0208, 0.0208, 0.0221, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:47:19,819 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118229.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 01:47:36,723 INFO [finetune.py:976] (5/7) Epoch 21, batch 3700, loss[loss=0.1536, simple_loss=0.2394, pruned_loss=0.03387, over 4806.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2477, pruned_loss=0.05301, over 953753.55 frames. ], batch size: 38, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:47:37,981 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1614, 2.1028, 2.0410, 1.3926, 2.0251, 2.1907, 2.1399, 1.7173], + device='cuda:5'), covar=tensor([0.0600, 0.0636, 0.0768, 0.0953, 0.0710, 0.0721, 0.0678, 0.1197], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0135, 0.0139, 0.0120, 0.0125, 0.0138, 0.0139, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:47:39,056 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.015e+02 1.607e+02 1.941e+02 2.377e+02 3.454e+02, threshold=3.882e+02, percent-clipped=0.0 +2023-03-27 01:47:46,805 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118269.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:48:00,441 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118289.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:48:10,300 INFO [finetune.py:976] (5/7) Epoch 21, batch 3750, loss[loss=0.2218, simple_loss=0.2951, pruned_loss=0.07421, over 4861.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.249, pruned_loss=0.0536, over 951535.94 frames. ], batch size: 34, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:48:26,335 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118329.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:48:26,957 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118330.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:48:40,955 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118350.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:48:40,974 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0063, 1.8899, 1.6150, 1.7341, 1.8103, 1.7838, 1.8310, 2.5089], + device='cuda:5'), covar=tensor([0.3546, 0.3966, 0.2970, 0.3683, 0.3868, 0.2282, 0.3566, 0.1613], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0233, 0.0277, 0.0253, 0.0223, 0.0252, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:48:43,715 INFO [finetune.py:976] (5/7) Epoch 21, batch 3800, loss[loss=0.1758, simple_loss=0.2605, pruned_loss=0.04552, over 4914.00 frames. ], tot_loss[loss=0.1782, simple_loss=0.2495, pruned_loss=0.05344, over 953299.65 frames. ], batch size: 46, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:48:46,090 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.364e+01 1.561e+02 1.815e+02 2.293e+02 4.441e+02, threshold=3.631e+02, percent-clipped=1.0 +2023-03-27 01:49:06,633 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118390.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:49:09,003 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-27 01:49:11,214 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118396.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:49:17,067 INFO [finetune.py:976] (5/7) Epoch 21, batch 3850, loss[loss=0.1876, simple_loss=0.2507, pruned_loss=0.06231, over 4843.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2489, pruned_loss=0.05339, over 953185.29 frames. ], batch size: 47, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:49:41,131 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1547, 2.1986, 1.9005, 2.4171, 2.8962, 2.2487, 2.2872, 1.6533], + device='cuda:5'), covar=tensor([0.2110, 0.1835, 0.1808, 0.1414, 0.1488, 0.1082, 0.1885, 0.1866], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0210, 0.0212, 0.0194, 0.0243, 0.0188, 0.0217, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:49:50,287 INFO [finetune.py:976] (5/7) Epoch 21, batch 3900, loss[loss=0.147, simple_loss=0.2215, pruned_loss=0.03618, over 4756.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2469, pruned_loss=0.05275, over 954120.94 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:49:52,685 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.530e+02 1.819e+02 2.327e+02 4.856e+02, threshold=3.639e+02, percent-clipped=2.0 +2023-03-27 01:50:25,012 INFO [finetune.py:976] (5/7) Epoch 21, batch 3950, loss[loss=0.1992, simple_loss=0.2538, pruned_loss=0.07231, over 4760.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2432, pruned_loss=0.05122, over 955619.95 frames. ], batch size: 27, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:50:28,581 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7024, 1.1983, 0.8821, 1.5824, 2.2254, 1.0675, 1.4562, 1.6027], + device='cuda:5'), covar=tensor([0.1431, 0.2135, 0.1815, 0.1210, 0.1828, 0.1989, 0.1429, 0.1967], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0092, 0.0120, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 01:50:48,086 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118524.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 01:51:16,191 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-27 01:51:19,654 INFO [finetune.py:976] (5/7) Epoch 21, batch 4000, loss[loss=0.2257, simple_loss=0.2905, pruned_loss=0.08044, over 4101.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2432, pruned_loss=0.05168, over 953214.76 frames. ], batch size: 66, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:51:26,596 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.742e+01 1.543e+02 1.809e+02 2.201e+02 4.154e+02, threshold=3.618e+02, percent-clipped=2.0 +2023-03-27 01:51:56,641 INFO [finetune.py:976] (5/7) Epoch 21, batch 4050, loss[loss=0.2084, simple_loss=0.2705, pruned_loss=0.07313, over 4929.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2468, pruned_loss=0.05269, over 953242.82 frames. ], batch size: 42, lr: 3.18e-03, grad_scale: 32.0 +2023-03-27 01:52:11,486 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118625.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:52:15,696 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118631.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:52:24,638 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118645.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:52:30,076 INFO [finetune.py:976] (5/7) Epoch 21, batch 4100, loss[loss=0.2082, simple_loss=0.2778, pruned_loss=0.06928, over 4884.00 frames. ], tot_loss[loss=0.1789, simple_loss=0.2505, pruned_loss=0.05371, over 955190.22 frames. ], batch size: 32, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:52:33,039 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4532, 1.6078, 1.2060, 1.5342, 1.8533, 1.6384, 1.4592, 1.3458], + device='cuda:5'), covar=tensor([0.0354, 0.0270, 0.0641, 0.0306, 0.0186, 0.0565, 0.0285, 0.0402], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0144, 0.0111, 0.0099, 0.0110, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5877e-05, 8.1451e-05, 1.1289e-04, 8.5525e-05, 7.6895e-05, 8.1479e-05, + 7.4523e-05, 8.5365e-05], device='cuda:5') +2023-03-27 01:52:33,508 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.828e+01 1.601e+02 1.824e+02 2.338e+02 3.980e+02, threshold=3.647e+02, percent-clipped=1.0 +2023-03-27 01:52:45,580 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7913, 2.1120, 1.6289, 1.8653, 2.5136, 2.4362, 2.0629, 1.9693], + device='cuda:5'), covar=tensor([0.0447, 0.0320, 0.0661, 0.0341, 0.0251, 0.0571, 0.0385, 0.0389], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0143, 0.0111, 0.0099, 0.0110, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5695e-05, 8.1213e-05, 1.1248e-04, 8.5251e-05, 7.6701e-05, 8.1238e-05, + 7.4281e-05, 8.5111e-05], device='cuda:5') +2023-03-27 01:52:51,559 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118685.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:52:56,324 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118692.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:52:58,684 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118696.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:53:03,515 INFO [finetune.py:976] (5/7) Epoch 21, batch 4150, loss[loss=0.2093, simple_loss=0.2747, pruned_loss=0.07192, over 4853.00 frames. ], tot_loss[loss=0.179, simple_loss=0.2506, pruned_loss=0.05368, over 955396.70 frames. ], batch size: 44, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:53:04,881 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3506, 2.2747, 1.9495, 0.9936, 2.1327, 1.8812, 1.7446, 2.1025], + device='cuda:5'), covar=tensor([0.1146, 0.0779, 0.1870, 0.2149, 0.1487, 0.2293, 0.2171, 0.1158], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0198, 0.0182, 0.0209, 0.0208, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:53:13,829 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-03-27 01:53:31,224 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=118744.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:53:37,402 INFO [finetune.py:976] (5/7) Epoch 21, batch 4200, loss[loss=0.1755, simple_loss=0.2464, pruned_loss=0.05231, over 4908.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.25, pruned_loss=0.05274, over 957420.95 frames. ], batch size: 46, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:53:39,821 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.932e+01 1.492e+02 1.761e+02 2.066e+02 3.902e+02, threshold=3.521e+02, percent-clipped=1.0 +2023-03-27 01:54:11,371 INFO [finetune.py:976] (5/7) Epoch 21, batch 4250, loss[loss=0.1597, simple_loss=0.2394, pruned_loss=0.03998, over 4818.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2469, pruned_loss=0.05143, over 957432.38 frames. ], batch size: 33, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:54:25,986 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118824.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:54:28,375 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6412, 3.5869, 3.4184, 1.6447, 3.6820, 2.7836, 0.7971, 2.5387], + device='cuda:5'), covar=tensor([0.2516, 0.1855, 0.1612, 0.3440, 0.1075, 0.1038, 0.4418, 0.1564], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0158, 0.0130, 0.0160, 0.0122, 0.0147, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 01:54:45,138 INFO [finetune.py:976] (5/7) Epoch 21, batch 4300, loss[loss=0.1479, simple_loss=0.2248, pruned_loss=0.03551, over 4852.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2435, pruned_loss=0.05047, over 957004.48 frames. ], batch size: 47, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:54:47,577 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.108e+02 1.563e+02 1.855e+02 2.179e+02 3.656e+02, threshold=3.709e+02, percent-clipped=1.0 +2023-03-27 01:54:57,553 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=118872.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:55:14,849 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1628, 1.9307, 1.8125, 2.1952, 2.5344, 2.1684, 2.1503, 1.6882], + device='cuda:5'), covar=tensor([0.2173, 0.2123, 0.1894, 0.1608, 0.1929, 0.1119, 0.1960, 0.1903], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0210, 0.0214, 0.0196, 0.0244, 0.0189, 0.0218, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:55:18,873 INFO [finetune.py:976] (5/7) Epoch 21, batch 4350, loss[loss=0.1397, simple_loss=0.228, pruned_loss=0.02571, over 4925.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2408, pruned_loss=0.0493, over 956748.95 frames. ], batch size: 38, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:55:20,318 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.08 vs. limit=5.0 +2023-03-27 01:55:33,258 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118925.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:55:48,467 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118945.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:55:59,297 INFO [finetune.py:976] (5/7) Epoch 21, batch 4400, loss[loss=0.1837, simple_loss=0.236, pruned_loss=0.06569, over 4205.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2422, pruned_loss=0.05064, over 956154.04 frames. ], batch size: 65, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:56:01,713 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.753e+01 1.466e+02 1.745e+02 2.136e+02 3.634e+02, threshold=3.490e+02, percent-clipped=0.0 +2023-03-27 01:56:18,414 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=118973.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:56:18,470 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5306, 1.4279, 1.3945, 1.4091, 0.8115, 2.3359, 0.7462, 1.2036], + device='cuda:5'), covar=tensor([0.3499, 0.2679, 0.2253, 0.2602, 0.2038, 0.0384, 0.2797, 0.1398], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0117, 0.0121, 0.0124, 0.0114, 0.0097, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 01:56:31,289 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118985.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:56:36,932 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118987.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:56:40,527 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=118993.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:56:51,293 INFO [finetune.py:976] (5/7) Epoch 21, batch 4450, loss[loss=0.2126, simple_loss=0.2999, pruned_loss=0.0626, over 4806.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2465, pruned_loss=0.0521, over 956067.58 frames. ], batch size: 51, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:57:11,420 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=119033.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:57:16,764 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1573, 1.9268, 1.8683, 0.8635, 2.1486, 2.3632, 2.1345, 1.7620], + device='cuda:5'), covar=tensor([0.0850, 0.0643, 0.0584, 0.0650, 0.0504, 0.0588, 0.0397, 0.0733], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0149, 0.0126, 0.0123, 0.0130, 0.0128, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([8.9306e-05, 1.0747e-04, 8.9880e-05, 8.6632e-05, 9.1639e-05, 9.1562e-05, + 1.0119e-04, 1.0565e-04], device='cuda:5') +2023-03-27 01:57:20,386 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 01:57:25,005 INFO [finetune.py:976] (5/7) Epoch 21, batch 4500, loss[loss=0.1479, simple_loss=0.2085, pruned_loss=0.04359, over 4107.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2489, pruned_loss=0.05312, over 955288.87 frames. ], batch size: 17, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:57:27,418 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.079e+02 1.548e+02 1.910e+02 2.429e+02 4.520e+02, threshold=3.820e+02, percent-clipped=3.0 +2023-03-27 01:57:37,164 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7902, 0.9829, 1.7922, 1.7604, 1.5692, 1.5356, 1.6543, 1.7276], + device='cuda:5'), covar=tensor([0.3315, 0.3646, 0.3033, 0.3260, 0.4406, 0.3599, 0.3983, 0.2785], + device='cuda:5'), in_proj_covar=tensor([0.0254, 0.0241, 0.0263, 0.0281, 0.0279, 0.0256, 0.0289, 0.0244], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:57:38,336 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119075.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:57:49,668 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4650, 2.2630, 1.9321, 0.9412, 2.1799, 1.8608, 1.6556, 2.1261], + device='cuda:5'), covar=tensor([0.1029, 0.0886, 0.1898, 0.2201, 0.1542, 0.2403, 0.2369, 0.1121], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0191, 0.0199, 0.0183, 0.0210, 0.0209, 0.0223, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:57:51,483 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119093.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:57:58,454 INFO [finetune.py:976] (5/7) Epoch 21, batch 4550, loss[loss=0.2109, simple_loss=0.2816, pruned_loss=0.07008, over 4927.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2511, pruned_loss=0.05402, over 954296.34 frames. ], batch size: 42, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:58:19,746 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119136.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 01:58:31,554 INFO [finetune.py:976] (5/7) Epoch 21, batch 4600, loss[loss=0.2309, simple_loss=0.2742, pruned_loss=0.09387, over 4826.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2514, pruned_loss=0.05393, over 956410.96 frames. ], batch size: 38, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:58:31,664 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119154.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 01:58:34,459 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.653e+02 1.869e+02 2.317e+02 3.451e+02, threshold=3.738e+02, percent-clipped=0.0 +2023-03-27 01:58:59,855 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1239, 1.9510, 1.7298, 1.7583, 1.8581, 1.8730, 1.8831, 2.5533], + device='cuda:5'), covar=tensor([0.3626, 0.4058, 0.3285, 0.3605, 0.3873, 0.2402, 0.3511, 0.1785], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0232, 0.0277, 0.0254, 0.0224, 0.0253, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:59:05,265 INFO [finetune.py:976] (5/7) Epoch 21, batch 4650, loss[loss=0.2008, simple_loss=0.2606, pruned_loss=0.07051, over 4932.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2484, pruned_loss=0.05342, over 954945.23 frames. ], batch size: 33, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:59:05,382 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5158, 2.4154, 1.9563, 0.9881, 2.1725, 1.8864, 1.7893, 2.1296], + device='cuda:5'), covar=tensor([0.0934, 0.0781, 0.1767, 0.2088, 0.1412, 0.2315, 0.2255, 0.1058], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0192, 0.0200, 0.0183, 0.0211, 0.0210, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 01:59:38,317 INFO [finetune.py:976] (5/7) Epoch 21, batch 4700, loss[loss=0.1442, simple_loss=0.2171, pruned_loss=0.03562, over 4912.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2452, pruned_loss=0.05259, over 955741.92 frames. ], batch size: 46, lr: 3.18e-03, grad_scale: 64.0 +2023-03-27 01:59:40,728 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.102e+02 1.521e+02 1.791e+02 2.382e+02 6.096e+02, threshold=3.583e+02, percent-clipped=7.0 +2023-03-27 01:59:59,469 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119287.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:00:11,568 INFO [finetune.py:976] (5/7) Epoch 21, batch 4750, loss[loss=0.1713, simple_loss=0.2442, pruned_loss=0.04917, over 4819.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.243, pruned_loss=0.05163, over 956650.22 frames. ], batch size: 33, lr: 3.17e-03, grad_scale: 64.0 +2023-03-27 02:00:31,335 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=119335.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:00:32,002 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1666, 2.1404, 2.0257, 2.4469, 2.6320, 2.4584, 2.2053, 1.8597], + device='cuda:5'), covar=tensor([0.2160, 0.1669, 0.1725, 0.1530, 0.1677, 0.1017, 0.2009, 0.1941], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0212, 0.0216, 0.0197, 0.0245, 0.0190, 0.0220, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:00:42,925 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1969, 1.6372, 2.4759, 3.8445, 2.6954, 2.6693, 0.7215, 3.2845], + device='cuda:5'), covar=tensor([0.1539, 0.1440, 0.1335, 0.0559, 0.0735, 0.1466, 0.2017, 0.0371], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0166, 0.0102, 0.0139, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 02:00:44,658 INFO [finetune.py:976] (5/7) Epoch 21, batch 4800, loss[loss=0.2281, simple_loss=0.2941, pruned_loss=0.08104, over 4849.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.2469, pruned_loss=0.05371, over 956936.86 frames. ], batch size: 44, lr: 3.17e-03, grad_scale: 64.0 +2023-03-27 02:00:47,502 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.052e+02 1.488e+02 1.781e+02 2.195e+02 3.360e+02, threshold=3.562e+02, percent-clipped=0.0 +2023-03-27 02:00:48,225 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7166, 1.2488, 0.8981, 1.6462, 2.1057, 1.5356, 1.4773, 1.5647], + device='cuda:5'), covar=tensor([0.1498, 0.2142, 0.1867, 0.1167, 0.1840, 0.1849, 0.1394, 0.2009], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0111, 0.0092, 0.0120, 0.0093, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 02:00:55,396 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0471, 1.8870, 1.5749, 1.5915, 1.7473, 1.7517, 1.8331, 2.4774], + device='cuda:5'), covar=tensor([0.3810, 0.3683, 0.3402, 0.3569, 0.3856, 0.2413, 0.3637, 0.1815], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0233, 0.0277, 0.0254, 0.0223, 0.0253, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:01:22,406 INFO [finetune.py:976] (5/7) Epoch 21, batch 4850, loss[loss=0.1511, simple_loss=0.2251, pruned_loss=0.03849, over 4916.00 frames. ], tot_loss[loss=0.1786, simple_loss=0.2493, pruned_loss=0.05394, over 956303.03 frames. ], batch size: 42, lr: 3.17e-03, grad_scale: 64.0 +2023-03-27 02:01:55,526 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119431.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:01:57,237 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-27 02:02:15,273 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119449.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 02:02:19,174 INFO [finetune.py:976] (5/7) Epoch 21, batch 4900, loss[loss=0.1507, simple_loss=0.2284, pruned_loss=0.03648, over 4821.00 frames. ], tot_loss[loss=0.1801, simple_loss=0.2513, pruned_loss=0.05447, over 954993.13 frames. ], batch size: 30, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:02:19,295 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2321, 2.8694, 3.0098, 3.2049, 2.9767, 2.8511, 3.2870, 0.9758], + device='cuda:5'), covar=tensor([0.1147, 0.1136, 0.1215, 0.1180, 0.1915, 0.1950, 0.1306, 0.6063], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0246, 0.0283, 0.0295, 0.0337, 0.0287, 0.0306, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:02:25,216 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.684e+02 1.937e+02 2.365e+02 4.201e+02, threshold=3.874e+02, percent-clipped=2.0 +2023-03-27 02:02:39,181 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 02:02:48,988 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-27 02:02:55,581 INFO [finetune.py:976] (5/7) Epoch 21, batch 4950, loss[loss=0.1891, simple_loss=0.2542, pruned_loss=0.06197, over 4838.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.2515, pruned_loss=0.05406, over 955135.69 frames. ], batch size: 30, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:03:02,770 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119514.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:03:07,103 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3817, 1.2059, 1.1152, 1.3032, 1.5712, 1.5340, 1.2983, 1.1976], + device='cuda:5'), covar=tensor([0.0472, 0.0371, 0.0787, 0.0368, 0.0319, 0.0492, 0.0379, 0.0474], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0107, 0.0144, 0.0112, 0.0099, 0.0111, 0.0101, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.6305e-05, 8.1821e-05, 1.1324e-04, 8.6060e-05, 7.7405e-05, 8.2212e-05, + 7.5109e-05, 8.5922e-05], device='cuda:5') +2023-03-27 02:03:14,882 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0456, 1.0149, 1.0123, 0.4535, 0.9321, 1.1681, 1.2055, 1.0223], + device='cuda:5'), covar=tensor([0.0956, 0.0604, 0.0560, 0.0598, 0.0568, 0.0828, 0.0412, 0.0718], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0150, 0.0127, 0.0123, 0.0131, 0.0129, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([9.0032e-05, 1.0818e-04, 9.0608e-05, 8.7033e-05, 9.2100e-05, 9.1989e-05, + 1.0212e-04, 1.0626e-04], device='cuda:5') +2023-03-27 02:03:25,022 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-27 02:03:29,004 INFO [finetune.py:976] (5/7) Epoch 21, batch 5000, loss[loss=0.1798, simple_loss=0.2433, pruned_loss=0.05818, over 4900.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2484, pruned_loss=0.05278, over 951788.68 frames. ], batch size: 32, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:03:32,980 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.554e+02 1.853e+02 2.138e+02 3.358e+02, threshold=3.705e+02, percent-clipped=0.0 +2023-03-27 02:03:43,303 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119575.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:04:00,506 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6677, 1.5008, 1.0505, 0.2441, 1.2731, 1.4809, 1.4455, 1.4828], + device='cuda:5'), covar=tensor([0.0869, 0.0843, 0.1317, 0.1999, 0.1284, 0.2255, 0.2309, 0.0797], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0190, 0.0198, 0.0182, 0.0209, 0.0208, 0.0223, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:04:02,228 INFO [finetune.py:976] (5/7) Epoch 21, batch 5050, loss[loss=0.1422, simple_loss=0.2078, pruned_loss=0.03825, over 4272.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2457, pruned_loss=0.052, over 953585.90 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:04:23,692 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-27 02:04:35,255 INFO [finetune.py:976] (5/7) Epoch 21, batch 5100, loss[loss=0.1672, simple_loss=0.2289, pruned_loss=0.05273, over 4763.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2424, pruned_loss=0.05085, over 954849.61 frames. ], batch size: 54, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:04:38,280 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6272, 1.2127, 0.8964, 1.6774, 2.1089, 1.4326, 1.3731, 1.5552], + device='cuda:5'), covar=tensor([0.1528, 0.2058, 0.2014, 0.1261, 0.1919, 0.2037, 0.1552, 0.2026], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0110, 0.0092, 0.0119, 0.0093, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 02:04:39,201 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.052e+02 1.479e+02 1.750e+02 2.173e+02 3.976e+02, threshold=3.500e+02, percent-clipped=1.0 +2023-03-27 02:04:43,508 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119665.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:05:08,856 INFO [finetune.py:976] (5/7) Epoch 21, batch 5150, loss[loss=0.1876, simple_loss=0.2684, pruned_loss=0.05335, over 4855.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2417, pruned_loss=0.05019, over 952016.39 frames. ], batch size: 44, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:05:09,520 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5935, 3.3475, 3.2380, 1.5138, 3.5194, 2.5870, 0.8278, 2.2161], + device='cuda:5'), covar=tensor([0.2281, 0.2381, 0.1557, 0.3360, 0.1198, 0.1068, 0.4068, 0.1623], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0177, 0.0158, 0.0129, 0.0160, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 02:05:24,653 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119726.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:05:27,584 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119731.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:05:38,884 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119749.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 02:05:42,228 INFO [finetune.py:976] (5/7) Epoch 21, batch 5200, loss[loss=0.1506, simple_loss=0.2155, pruned_loss=0.04281, over 3920.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2471, pruned_loss=0.05261, over 952089.89 frames. ], batch size: 17, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:05:45,719 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.656e+02 1.877e+02 2.270e+02 4.720e+02, threshold=3.754e+02, percent-clipped=1.0 +2023-03-27 02:05:59,326 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=119779.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:06:08,383 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9204, 1.8266, 2.0044, 1.1282, 1.9879, 2.0402, 1.9219, 1.6494], + device='cuda:5'), covar=tensor([0.0534, 0.0657, 0.0561, 0.0880, 0.0658, 0.0643, 0.0593, 0.1077], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0136, 0.0138, 0.0120, 0.0125, 0.0138, 0.0139, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:06:10,788 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=119797.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:06:15,131 INFO [finetune.py:976] (5/7) Epoch 21, batch 5250, loss[loss=0.1517, simple_loss=0.2233, pruned_loss=0.04004, over 4747.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2486, pruned_loss=0.05266, over 953166.52 frames. ], batch size: 59, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:06:18,112 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0323, 1.8742, 1.6240, 1.7655, 1.8389, 1.8093, 1.8368, 2.5198], + device='cuda:5'), covar=tensor([0.3348, 0.3765, 0.2988, 0.3631, 0.3742, 0.2257, 0.3769, 0.1633], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0260, 0.0231, 0.0275, 0.0252, 0.0222, 0.0252, 0.0233], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:06:59,461 INFO [finetune.py:976] (5/7) Epoch 21, batch 5300, loss[loss=0.2603, simple_loss=0.3119, pruned_loss=0.1044, over 4898.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2511, pruned_loss=0.05417, over 954054.40 frames. ], batch size: 37, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:07:07,166 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.978e+01 1.512e+02 1.747e+02 2.069e+02 4.039e+02, threshold=3.495e+02, percent-clipped=1.0 +2023-03-27 02:07:19,029 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119870.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:07:54,831 INFO [finetune.py:976] (5/7) Epoch 21, batch 5350, loss[loss=0.1668, simple_loss=0.2434, pruned_loss=0.04506, over 4871.00 frames. ], tot_loss[loss=0.1791, simple_loss=0.2508, pruned_loss=0.05373, over 953016.88 frames. ], batch size: 31, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:08:28,088 INFO [finetune.py:976] (5/7) Epoch 21, batch 5400, loss[loss=0.1406, simple_loss=0.2112, pruned_loss=0.03494, over 4786.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2479, pruned_loss=0.05266, over 953888.63 frames. ], batch size: 29, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:08:31,174 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.474e+02 1.662e+02 2.015e+02 3.492e+02, threshold=3.324e+02, percent-clipped=0.0 +2023-03-27 02:08:36,043 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6447, 1.5201, 1.4921, 1.6454, 1.1503, 3.2672, 1.2924, 1.6452], + device='cuda:5'), covar=tensor([0.3092, 0.2456, 0.2186, 0.2243, 0.1766, 0.0215, 0.2469, 0.1205], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0114, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:09:01,966 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-03-27 02:09:02,905 INFO [finetune.py:976] (5/7) Epoch 21, batch 5450, loss[loss=0.1489, simple_loss=0.2263, pruned_loss=0.03573, over 4910.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2446, pruned_loss=0.05143, over 954309.97 frames. ], batch size: 36, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:09:13,780 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=120021.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:09:36,195 INFO [finetune.py:976] (5/7) Epoch 21, batch 5500, loss[loss=0.1661, simple_loss=0.2479, pruned_loss=0.04213, over 4820.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2415, pruned_loss=0.05048, over 953430.51 frames. ], batch size: 38, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:09:39,680 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.953e+01 1.422e+02 1.683e+02 2.099e+02 3.794e+02, threshold=3.366e+02, percent-clipped=2.0 +2023-03-27 02:10:09,946 INFO [finetune.py:976] (5/7) Epoch 21, batch 5550, loss[loss=0.1648, simple_loss=0.2408, pruned_loss=0.04437, over 4925.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2425, pruned_loss=0.05046, over 954318.20 frames. ], batch size: 38, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:10:35,880 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5431, 1.3702, 2.1134, 3.1300, 2.0303, 2.1323, 1.0871, 2.5862], + device='cuda:5'), covar=tensor([0.1704, 0.1508, 0.1153, 0.0668, 0.0874, 0.1665, 0.1723, 0.0558], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0166, 0.0102, 0.0139, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 02:10:42,282 INFO [finetune.py:976] (5/7) Epoch 21, batch 5600, loss[loss=0.1788, simple_loss=0.2606, pruned_loss=0.04853, over 4904.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.2464, pruned_loss=0.05108, over 955569.54 frames. ], batch size: 36, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:10:45,197 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.030e+02 1.550e+02 1.831e+02 2.203e+02 3.727e+02, threshold=3.662e+02, percent-clipped=1.0 +2023-03-27 02:10:51,654 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-03-27 02:10:52,154 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=120170.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:11:12,067 INFO [finetune.py:976] (5/7) Epoch 21, batch 5650, loss[loss=0.1594, simple_loss=0.2458, pruned_loss=0.03649, over 4902.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2484, pruned_loss=0.05152, over 953629.00 frames. ], batch size: 35, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:11:12,770 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0197, 1.8995, 1.6291, 1.8209, 1.7967, 1.7863, 1.7775, 2.5376], + device='cuda:5'), covar=tensor([0.3663, 0.3876, 0.3218, 0.3901, 0.4108, 0.2298, 0.3821, 0.1781], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0262, 0.0232, 0.0277, 0.0253, 0.0222, 0.0252, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:11:20,666 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=120218.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:11:30,715 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7668, 1.3122, 0.9408, 1.6791, 2.2243, 1.2489, 1.5148, 1.6961], + device='cuda:5'), covar=tensor([0.1357, 0.1894, 0.1728, 0.1121, 0.1584, 0.1761, 0.1416, 0.1872], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0110, 0.0092, 0.0120, 0.0093, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 02:11:41,961 INFO [finetune.py:976] (5/7) Epoch 21, batch 5700, loss[loss=0.1461, simple_loss=0.2047, pruned_loss=0.04376, over 4244.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.244, pruned_loss=0.05076, over 934299.07 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 32.0 +2023-03-27 02:11:44,947 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.458e+02 1.705e+02 2.128e+02 3.595e+02, threshold=3.409e+02, percent-clipped=0.0 +2023-03-27 02:12:12,192 INFO [finetune.py:976] (5/7) Epoch 22, batch 0, loss[loss=0.1915, simple_loss=0.2632, pruned_loss=0.05994, over 4921.00 frames. ], tot_loss[loss=0.1915, simple_loss=0.2632, pruned_loss=0.05994, over 4921.00 frames. ], batch size: 42, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:12:12,192 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 02:12:31,960 INFO [finetune.py:1010] (5/7) Epoch 22, validation: loss=0.1597, simple_loss=0.228, pruned_loss=0.04574, over 2265189.00 frames. +2023-03-27 02:12:31,961 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 02:12:35,246 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3642, 2.5120, 2.2844, 1.7632, 2.3067, 2.5762, 2.7183, 2.0364], + device='cuda:5'), covar=tensor([0.0733, 0.0680, 0.0835, 0.0936, 0.0969, 0.0856, 0.0668, 0.1318], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0137, 0.0139, 0.0121, 0.0125, 0.0139, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:13:15,375 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=120321.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:13:26,046 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-03-27 02:13:27,846 INFO [finetune.py:976] (5/7) Epoch 22, batch 50, loss[loss=0.1646, simple_loss=0.2365, pruned_loss=0.04633, over 4701.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.2515, pruned_loss=0.05514, over 215334.85 frames. ], batch size: 54, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:13:45,818 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7736, 1.0151, 1.8405, 1.7417, 1.5709, 1.5158, 1.6789, 1.7355], + device='cuda:5'), covar=tensor([0.3405, 0.3466, 0.2675, 0.3026, 0.3926, 0.3183, 0.3664, 0.2627], + device='cuda:5'), in_proj_covar=tensor([0.0256, 0.0243, 0.0264, 0.0284, 0.0282, 0.0259, 0.0292, 0.0247], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:13:47,645 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2021, 2.0743, 1.7094, 2.0547, 2.0930, 1.8272, 2.3891, 2.1100], + device='cuda:5'), covar=tensor([0.1317, 0.2051, 0.3236, 0.2638, 0.2606, 0.1804, 0.2949, 0.1955], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0188, 0.0235, 0.0253, 0.0246, 0.0203, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:13:48,216 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5901, 2.3801, 3.1766, 4.5617, 3.2965, 3.2094, 1.2189, 3.8392], + device='cuda:5'), covar=tensor([0.1480, 0.1245, 0.1174, 0.0387, 0.0638, 0.1159, 0.1992, 0.0344], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0134, 0.0165, 0.0101, 0.0138, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 02:13:48,726 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.043e+02 1.631e+02 1.972e+02 2.363e+02 4.295e+02, threshold=3.943e+02, percent-clipped=3.0 +2023-03-27 02:13:55,432 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=120369.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:14:04,335 INFO [finetune.py:976] (5/7) Epoch 22, batch 100, loss[loss=0.1689, simple_loss=0.2362, pruned_loss=0.05085, over 4887.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2462, pruned_loss=0.05281, over 379711.10 frames. ], batch size: 32, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:14:35,065 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-03-27 02:14:36,539 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4624, 1.3500, 1.3307, 1.3398, 0.8243, 2.2512, 0.7373, 1.1441], + device='cuda:5'), covar=tensor([0.3094, 0.2556, 0.2219, 0.2342, 0.1886, 0.0337, 0.2756, 0.1307], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0124, 0.0114, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:14:37,019 INFO [finetune.py:976] (5/7) Epoch 22, batch 150, loss[loss=0.1915, simple_loss=0.2536, pruned_loss=0.06465, over 4847.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2428, pruned_loss=0.05263, over 508086.59 frames. ], batch size: 44, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:14:54,532 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-27 02:14:55,336 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.445e+02 1.737e+02 2.098e+02 4.550e+02, threshold=3.473e+02, percent-clipped=2.0 +2023-03-27 02:15:10,229 INFO [finetune.py:976] (5/7) Epoch 22, batch 200, loss[loss=0.1654, simple_loss=0.2394, pruned_loss=0.04567, over 4838.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2406, pruned_loss=0.0519, over 607783.89 frames. ], batch size: 30, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:15:42,764 INFO [finetune.py:976] (5/7) Epoch 22, batch 250, loss[loss=0.1648, simple_loss=0.257, pruned_loss=0.03636, over 4851.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2446, pruned_loss=0.05315, over 685843.07 frames. ], batch size: 49, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:15:58,088 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-27 02:16:01,906 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.519e+02 1.843e+02 2.180e+02 3.548e+02, threshold=3.686e+02, percent-clipped=1.0 +2023-03-27 02:16:16,409 INFO [finetune.py:976] (5/7) Epoch 22, batch 300, loss[loss=0.1693, simple_loss=0.2439, pruned_loss=0.04736, over 4922.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2494, pruned_loss=0.05397, over 747475.11 frames. ], batch size: 38, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:16:42,755 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2779, 2.0359, 1.5317, 0.6459, 1.6564, 1.9088, 1.8744, 1.8507], + device='cuda:5'), covar=tensor([0.0779, 0.0781, 0.1395, 0.1895, 0.1297, 0.1986, 0.1925, 0.0831], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0198, 0.0183, 0.0209, 0.0207, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:16:50,473 INFO [finetune.py:976] (5/7) Epoch 22, batch 350, loss[loss=0.205, simple_loss=0.2783, pruned_loss=0.06583, over 4862.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.25, pruned_loss=0.05417, over 790633.59 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:17:09,292 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.647e+02 1.881e+02 2.280e+02 4.594e+02, threshold=3.762e+02, percent-clipped=3.0 +2023-03-27 02:17:19,808 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120676.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:17:23,372 INFO [finetune.py:976] (5/7) Epoch 22, batch 400, loss[loss=0.1802, simple_loss=0.2637, pruned_loss=0.04838, over 4903.00 frames. ], tot_loss[loss=0.1792, simple_loss=0.2509, pruned_loss=0.05374, over 826731.62 frames. ], batch size: 46, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:17:41,254 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7595, 1.7294, 1.4894, 1.7773, 2.3165, 1.8725, 1.5664, 1.4175], + device='cuda:5'), covar=tensor([0.2643, 0.2358, 0.2396, 0.2015, 0.1807, 0.1508, 0.2788, 0.2496], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0209, 0.0213, 0.0195, 0.0243, 0.0189, 0.0217, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:18:12,315 INFO [finetune.py:976] (5/7) Epoch 22, batch 450, loss[loss=0.1765, simple_loss=0.2569, pruned_loss=0.04806, over 4822.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2494, pruned_loss=0.05308, over 856482.59 frames. ], batch size: 38, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:18:20,749 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120737.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:18:39,827 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0892, 2.0336, 2.0769, 1.5819, 2.0890, 2.2002, 2.2024, 1.6574], + device='cuda:5'), covar=tensor([0.0531, 0.0544, 0.0623, 0.0729, 0.0657, 0.0559, 0.0469, 0.1100], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0137, 0.0141, 0.0121, 0.0126, 0.0140, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:18:43,318 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.581e+02 1.866e+02 2.243e+02 3.725e+02, threshold=3.731e+02, percent-clipped=0.0 +2023-03-27 02:18:47,369 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9725, 1.8416, 1.6044, 1.6200, 1.7509, 1.7556, 1.7748, 2.4392], + device='cuda:5'), covar=tensor([0.3286, 0.3630, 0.2815, 0.3250, 0.3397, 0.2145, 0.3262, 0.1463], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0264, 0.0234, 0.0278, 0.0256, 0.0225, 0.0254, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:19:07,337 INFO [finetune.py:976] (5/7) Epoch 22, batch 500, loss[loss=0.1862, simple_loss=0.2549, pruned_loss=0.05872, over 4715.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2466, pruned_loss=0.05161, over 876766.06 frames. ], batch size: 59, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:19:14,671 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0518, 1.5938, 1.6910, 0.7926, 2.0010, 2.1703, 1.9423, 1.6243], + device='cuda:5'), covar=tensor([0.1071, 0.1041, 0.0698, 0.0773, 0.0605, 0.0665, 0.0654, 0.0877], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0149, 0.0125, 0.0123, 0.0130, 0.0128, 0.0140, 0.0147], + device='cuda:5'), out_proj_covar=tensor([8.9041e-05, 1.0732e-04, 8.9627e-05, 8.6422e-05, 9.1631e-05, 9.1369e-05, + 1.0067e-04, 1.0563e-04], device='cuda:5') +2023-03-27 02:19:20,816 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-03-27 02:19:40,279 INFO [finetune.py:976] (5/7) Epoch 22, batch 550, loss[loss=0.1505, simple_loss=0.2126, pruned_loss=0.04416, over 4928.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2433, pruned_loss=0.05105, over 894693.89 frames. ], batch size: 33, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:19:49,975 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8263, 3.3586, 3.4874, 3.6979, 3.5892, 3.3728, 3.9139, 1.2349], + device='cuda:5'), covar=tensor([0.0941, 0.0903, 0.0956, 0.1148, 0.1467, 0.1634, 0.0854, 0.5885], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0241, 0.0277, 0.0290, 0.0330, 0.0281, 0.0301, 0.0297], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:19:58,119 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.063e+02 1.544e+02 1.778e+02 2.172e+02 3.720e+02, threshold=3.555e+02, percent-clipped=0.0 +2023-03-27 02:20:13,118 INFO [finetune.py:976] (5/7) Epoch 22, batch 600, loss[loss=0.1516, simple_loss=0.2264, pruned_loss=0.0384, over 4819.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2428, pruned_loss=0.0507, over 908563.54 frames. ], batch size: 39, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:20:46,528 INFO [finetune.py:976] (5/7) Epoch 22, batch 650, loss[loss=0.2386, simple_loss=0.3102, pruned_loss=0.08347, over 4809.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2447, pruned_loss=0.05126, over 915927.84 frames. ], batch size: 45, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:21:04,770 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.580e+02 1.877e+02 2.237e+02 3.344e+02, threshold=3.754e+02, percent-clipped=0.0 +2023-03-27 02:21:20,033 INFO [finetune.py:976] (5/7) Epoch 22, batch 700, loss[loss=0.165, simple_loss=0.2447, pruned_loss=0.04272, over 4861.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.249, pruned_loss=0.05329, over 923403.30 frames. ], batch size: 31, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:21:32,894 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121003.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:21:43,175 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6629, 1.3318, 2.1023, 3.2325, 2.1745, 2.4180, 1.0924, 2.6670], + device='cuda:5'), covar=tensor([0.1841, 0.1834, 0.1517, 0.0744, 0.0910, 0.1780, 0.1903, 0.0565], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0117, 0.0135, 0.0166, 0.0102, 0.0138, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 02:21:53,221 INFO [finetune.py:976] (5/7) Epoch 22, batch 750, loss[loss=0.1536, simple_loss=0.2244, pruned_loss=0.04145, over 4791.00 frames. ], tot_loss[loss=0.1793, simple_loss=0.2505, pruned_loss=0.05399, over 931306.67 frames. ], batch size: 25, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:21:53,297 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121032.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:22:04,359 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0910, 1.9553, 2.5697, 3.8064, 2.7261, 2.8356, 1.2225, 3.1048], + device='cuda:5'), covar=tensor([0.1604, 0.1249, 0.1262, 0.0556, 0.0714, 0.1112, 0.1912, 0.0458], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0118, 0.0136, 0.0166, 0.0102, 0.0139, 0.0126, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 02:22:09,807 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.617e+02 1.906e+02 2.410e+02 4.829e+02, threshold=3.812e+02, percent-clipped=5.0 +2023-03-27 02:22:12,356 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.05 vs. limit=5.0 +2023-03-27 02:22:14,839 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121064.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:22:23,412 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-03-27 02:22:26,766 INFO [finetune.py:976] (5/7) Epoch 22, batch 800, loss[loss=0.1605, simple_loss=0.2337, pruned_loss=0.04368, over 4928.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2498, pruned_loss=0.05336, over 935936.05 frames. ], batch size: 38, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:22:47,251 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0824, 1.3137, 0.9550, 1.9682, 2.4346, 1.8722, 1.5474, 1.9281], + device='cuda:5'), covar=tensor([0.1334, 0.1973, 0.1906, 0.1037, 0.1745, 0.1805, 0.1349, 0.1780], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0092, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 02:23:10,265 INFO [finetune.py:976] (5/7) Epoch 22, batch 850, loss[loss=0.2021, simple_loss=0.2595, pruned_loss=0.0723, over 4911.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2475, pruned_loss=0.05271, over 939331.75 frames. ], batch size: 46, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:23:28,703 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.269e+01 1.509e+02 1.830e+02 2.168e+02 4.982e+02, threshold=3.659e+02, percent-clipped=1.0 +2023-03-27 02:23:37,212 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121164.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:23:58,239 INFO [finetune.py:976] (5/7) Epoch 22, batch 900, loss[loss=0.1568, simple_loss=0.2296, pruned_loss=0.04194, over 4749.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2448, pruned_loss=0.05199, over 943626.90 frames. ], batch size: 27, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:24:15,618 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1569, 1.7719, 1.7943, 0.8824, 2.0487, 2.1700, 2.0143, 1.6814], + device='cuda:5'), covar=tensor([0.0800, 0.0651, 0.0537, 0.0654, 0.0548, 0.0654, 0.0408, 0.0783], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0149, 0.0125, 0.0123, 0.0130, 0.0129, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([8.9121e-05, 1.0800e-04, 8.9650e-05, 8.6456e-05, 9.1731e-05, 9.1723e-05, + 1.0083e-04, 1.0541e-04], device='cuda:5') +2023-03-27 02:24:28,377 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6334, 1.9129, 1.5389, 1.6922, 2.2950, 2.2335, 1.9242, 1.8170], + device='cuda:5'), covar=tensor([0.0500, 0.0341, 0.0632, 0.0321, 0.0296, 0.0615, 0.0365, 0.0422], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0143, 0.0111, 0.0099, 0.0110, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.5820e-05, 8.1667e-05, 1.1225e-04, 8.5052e-05, 7.6635e-05, 8.1623e-05, + 7.4332e-05, 8.5275e-05], device='cuda:5') +2023-03-27 02:24:38,344 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121225.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:24:42,941 INFO [finetune.py:976] (5/7) Epoch 22, batch 950, loss[loss=0.195, simple_loss=0.272, pruned_loss=0.059, over 4801.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2433, pruned_loss=0.05169, over 948204.24 frames. ], batch size: 51, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:24:59,304 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.960e+01 1.496e+02 1.804e+02 2.225e+02 4.174e+02, threshold=3.608e+02, percent-clipped=1.0 +2023-03-27 02:25:15,678 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5708, 3.9136, 3.7520, 1.7983, 4.0643, 2.9859, 0.7776, 2.7326], + device='cuda:5'), covar=tensor([0.2501, 0.2260, 0.1355, 0.3369, 0.0871, 0.1066, 0.4574, 0.1517], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0158, 0.0130, 0.0159, 0.0122, 0.0147, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 02:25:16,229 INFO [finetune.py:976] (5/7) Epoch 22, batch 1000, loss[loss=0.1896, simple_loss=0.2604, pruned_loss=0.05937, over 4835.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2458, pruned_loss=0.05301, over 950903.02 frames. ], batch size: 30, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:25:19,344 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4765, 1.4153, 2.2140, 1.7138, 1.7468, 3.8387, 1.3686, 1.5977], + device='cuda:5'), covar=tensor([0.0935, 0.1835, 0.1228, 0.0985, 0.1551, 0.0225, 0.1589, 0.1821], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:25:29,305 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.83 vs. limit=5.0 +2023-03-27 02:25:33,746 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121311.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 02:25:45,363 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8758, 1.6861, 1.6258, 1.9404, 2.0428, 1.9603, 1.4107, 1.5934], + device='cuda:5'), covar=tensor([0.1791, 0.1724, 0.1610, 0.1420, 0.1319, 0.0989, 0.2341, 0.1670], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0215, 0.0196, 0.0243, 0.0190, 0.0218, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:25:49,277 INFO [finetune.py:976] (5/7) Epoch 22, batch 1050, loss[loss=0.1243, simple_loss=0.1859, pruned_loss=0.03135, over 4417.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.2475, pruned_loss=0.0529, over 951756.85 frames. ], batch size: 19, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:25:49,381 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121332.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:25:57,932 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-27 02:26:05,530 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.073e+02 1.613e+02 2.056e+02 2.633e+02 6.948e+02, threshold=4.113e+02, percent-clipped=5.0 +2023-03-27 02:26:05,624 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121359.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:26:09,271 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6780, 2.4193, 2.9808, 1.8176, 2.5768, 2.9291, 2.0957, 3.0118], + device='cuda:5'), covar=tensor([0.1281, 0.1731, 0.1540, 0.2310, 0.1067, 0.1515, 0.2633, 0.0930], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0204, 0.0191, 0.0189, 0.0174, 0.0214, 0.0216, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:26:09,894 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0615, 1.7709, 2.3639, 1.6110, 2.0825, 2.2595, 1.6393, 2.3876], + device='cuda:5'), covar=tensor([0.1004, 0.1685, 0.1363, 0.1866, 0.0722, 0.1235, 0.2533, 0.0674], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0204, 0.0191, 0.0189, 0.0174, 0.0214, 0.0216, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:26:13,979 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121372.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 02:26:19,241 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=121380.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:26:20,878 INFO [finetune.py:976] (5/7) Epoch 22, batch 1100, loss[loss=0.1585, simple_loss=0.2322, pruned_loss=0.04243, over 4789.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2468, pruned_loss=0.05288, over 950238.54 frames. ], batch size: 51, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:26:26,916 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9045, 1.8093, 1.9773, 1.1612, 1.8801, 1.9771, 1.8368, 1.4906], + device='cuda:5'), covar=tensor([0.0526, 0.0695, 0.0618, 0.0919, 0.0748, 0.0657, 0.0693, 0.1283], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0137, 0.0141, 0.0121, 0.0127, 0.0140, 0.0141, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:26:36,783 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121407.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:26:53,106 INFO [finetune.py:976] (5/7) Epoch 22, batch 1150, loss[loss=0.2172, simple_loss=0.2889, pruned_loss=0.0728, over 4923.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2479, pruned_loss=0.05297, over 951684.58 frames. ], batch size: 42, lr: 3.16e-03, grad_scale: 32.0 +2023-03-27 02:27:10,452 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.055e+01 1.683e+02 1.964e+02 2.424e+02 3.625e+02, threshold=3.928e+02, percent-clipped=0.0 +2023-03-27 02:27:15,966 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121468.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:27:25,673 INFO [finetune.py:976] (5/7) Epoch 22, batch 1200, loss[loss=0.1582, simple_loss=0.2255, pruned_loss=0.04547, over 4895.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2473, pruned_loss=0.05281, over 953088.33 frames. ], batch size: 32, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:27:30,873 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6926, 1.7352, 2.4488, 1.9042, 2.0772, 4.4128, 1.7692, 1.8041], + device='cuda:5'), covar=tensor([0.0985, 0.1818, 0.0999, 0.1037, 0.1483, 0.0232, 0.1425, 0.1897], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:27:49,399 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 02:27:49,693 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121520.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:27:57,413 INFO [finetune.py:976] (5/7) Epoch 22, batch 1250, loss[loss=0.1375, simple_loss=0.2002, pruned_loss=0.03745, over 4230.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2449, pruned_loss=0.05155, over 952701.81 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:28:26,676 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.015e+02 1.487e+02 1.790e+02 2.203e+02 4.512e+02, threshold=3.581e+02, percent-clipped=1.0 +2023-03-27 02:28:36,128 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-03-27 02:28:40,602 INFO [finetune.py:976] (5/7) Epoch 22, batch 1300, loss[loss=0.1046, simple_loss=0.1869, pruned_loss=0.01115, over 4763.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2426, pruned_loss=0.05059, over 954747.16 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:29:39,005 INFO [finetune.py:976] (5/7) Epoch 22, batch 1350, loss[loss=0.1873, simple_loss=0.258, pruned_loss=0.05833, over 4850.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.2431, pruned_loss=0.05077, over 954633.76 frames. ], batch size: 47, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:30:02,100 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.546e+02 1.913e+02 2.289e+02 6.231e+02, threshold=3.826e+02, percent-clipped=1.0 +2023-03-27 02:30:02,212 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121659.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:30:07,024 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121667.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 02:30:07,713 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1535, 2.0931, 1.7456, 2.1506, 1.9762, 1.9565, 1.9578, 2.7686], + device='cuda:5'), covar=tensor([0.3879, 0.4441, 0.3363, 0.3684, 0.4241, 0.2471, 0.4079, 0.1685], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0233, 0.0277, 0.0255, 0.0225, 0.0254, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:30:11,370 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5594, 2.4301, 2.0330, 2.4724, 2.4636, 2.2078, 2.6872, 2.5211], + device='cuda:5'), covar=tensor([0.1314, 0.1882, 0.2916, 0.2358, 0.2310, 0.1626, 0.2305, 0.1695], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0190, 0.0237, 0.0256, 0.0249, 0.0205, 0.0216, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:30:16,103 INFO [finetune.py:976] (5/7) Epoch 22, batch 1400, loss[loss=0.1496, simple_loss=0.2351, pruned_loss=0.03203, over 4829.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2458, pruned_loss=0.05158, over 953516.43 frames. ], batch size: 51, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:30:34,338 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=121707.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:30:47,435 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-03-27 02:30:49,372 INFO [finetune.py:976] (5/7) Epoch 22, batch 1450, loss[loss=0.1643, simple_loss=0.2428, pruned_loss=0.04286, over 4828.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2462, pruned_loss=0.05146, over 952592.25 frames. ], batch size: 47, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:31:08,591 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.158e+02 1.585e+02 1.838e+02 2.176e+02 4.319e+02, threshold=3.676e+02, percent-clipped=1.0 +2023-03-27 02:31:11,108 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121763.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:31:22,464 INFO [finetune.py:976] (5/7) Epoch 22, batch 1500, loss[loss=0.1668, simple_loss=0.2527, pruned_loss=0.04048, over 4821.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2473, pruned_loss=0.0515, over 952997.54 frames. ], batch size: 39, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:31:40,333 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 02:31:48,543 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9346, 3.0818, 2.7091, 2.0635, 2.8360, 3.0638, 3.2091, 2.7050], + device='cuda:5'), covar=tensor([0.0603, 0.0566, 0.0696, 0.0810, 0.0626, 0.0663, 0.0564, 0.0903], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0137, 0.0141, 0.0121, 0.0127, 0.0139, 0.0141, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:31:48,892 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-27 02:31:49,156 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121820.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:31:56,352 INFO [finetune.py:976] (5/7) Epoch 22, batch 1550, loss[loss=0.165, simple_loss=0.2511, pruned_loss=0.03944, over 4916.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2472, pruned_loss=0.0516, over 953249.06 frames. ], batch size: 38, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:32:15,624 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.105e+02 1.581e+02 1.856e+02 2.152e+02 3.350e+02, threshold=3.712e+02, percent-clipped=0.0 +2023-03-27 02:32:21,172 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=121868.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:32:29,537 INFO [finetune.py:976] (5/7) Epoch 22, batch 1600, loss[loss=0.1697, simple_loss=0.242, pruned_loss=0.04865, over 4898.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2461, pruned_loss=0.05164, over 952573.16 frames. ], batch size: 32, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:32:51,332 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.70 vs. limit=5.0 +2023-03-27 02:33:02,675 INFO [finetune.py:976] (5/7) Epoch 22, batch 1650, loss[loss=0.1568, simple_loss=0.206, pruned_loss=0.05386, over 4299.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2441, pruned_loss=0.05133, over 951905.17 frames. ], batch size: 65, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:33:09,518 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3270, 2.2329, 1.7501, 2.2495, 2.2352, 1.8990, 2.6087, 2.2747], + device='cuda:5'), covar=tensor([0.1259, 0.2071, 0.2865, 0.2494, 0.2349, 0.1527, 0.2905, 0.1648], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0188, 0.0236, 0.0255, 0.0248, 0.0204, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:33:13,713 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121950.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:33:22,468 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.553e+02 1.837e+02 2.107e+02 3.976e+02, threshold=3.675e+02, percent-clipped=1.0 +2023-03-27 02:33:33,831 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121967.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 02:33:46,461 INFO [finetune.py:976] (5/7) Epoch 22, batch 1700, loss[loss=0.1599, simple_loss=0.2296, pruned_loss=0.04504, over 4939.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2412, pruned_loss=0.0504, over 953025.43 frames. ], batch size: 33, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:33:48,501 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.46 vs. limit=5.0 +2023-03-27 02:34:13,898 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122011.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:34:16,848 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=122015.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 02:34:36,865 INFO [finetune.py:976] (5/7) Epoch 22, batch 1750, loss[loss=0.1764, simple_loss=0.2511, pruned_loss=0.05083, over 4896.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2436, pruned_loss=0.0515, over 951719.68 frames. ], batch size: 35, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:34:44,368 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8568, 1.7896, 1.7394, 1.8249, 1.5432, 3.3256, 1.5514, 1.9465], + device='cuda:5'), covar=tensor([0.2801, 0.2244, 0.1944, 0.2093, 0.1509, 0.0234, 0.2353, 0.1104], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0115, 0.0121, 0.0123, 0.0114, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:35:02,603 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5612, 1.4259, 1.3953, 1.5030, 0.9376, 3.2916, 1.2027, 1.6395], + device='cuda:5'), covar=tensor([0.3256, 0.2578, 0.2316, 0.2464, 0.2039, 0.0210, 0.2647, 0.1334], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0123, 0.0114, 0.0095, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:35:06,505 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.629e+02 1.889e+02 2.189e+02 5.095e+02, threshold=3.778e+02, percent-clipped=2.0 +2023-03-27 02:35:10,494 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122063.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:35:22,846 INFO [finetune.py:976] (5/7) Epoch 22, batch 1800, loss[loss=0.182, simple_loss=0.2622, pruned_loss=0.05093, over 4904.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2462, pruned_loss=0.05197, over 952060.90 frames. ], batch size: 37, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:35:41,158 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=122111.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:35:46,332 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122117.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:35:56,274 INFO [finetune.py:976] (5/7) Epoch 22, batch 1850, loss[loss=0.1692, simple_loss=0.2506, pruned_loss=0.04391, over 4818.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2482, pruned_loss=0.05275, over 954113.02 frames. ], batch size: 39, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:36:12,755 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.044e+01 1.577e+02 1.909e+02 2.256e+02 5.766e+02, threshold=3.818e+02, percent-clipped=1.0 +2023-03-27 02:36:27,303 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122178.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 02:36:29,613 INFO [finetune.py:976] (5/7) Epoch 22, batch 1900, loss[loss=0.1841, simple_loss=0.2551, pruned_loss=0.05658, over 4805.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.2487, pruned_loss=0.05233, over 953157.40 frames. ], batch size: 51, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:36:37,618 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122195.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:37:03,559 INFO [finetune.py:976] (5/7) Epoch 22, batch 1950, loss[loss=0.14, simple_loss=0.2154, pruned_loss=0.03229, over 4773.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2475, pruned_loss=0.05183, over 952150.64 frames. ], batch size: 54, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:37:10,376 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.68 vs. limit=5.0 +2023-03-27 02:37:14,612 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8115, 1.7370, 1.5333, 1.8363, 2.3596, 1.9241, 1.7708, 1.4912], + device='cuda:5'), covar=tensor([0.2267, 0.2052, 0.2126, 0.1724, 0.1721, 0.1299, 0.2279, 0.2090], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0211, 0.0214, 0.0197, 0.0244, 0.0190, 0.0218, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:37:17,049 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8642, 1.7389, 1.7335, 1.8254, 1.4254, 4.6521, 1.7334, 2.3073], + device='cuda:5'), covar=tensor([0.3164, 0.2433, 0.2110, 0.2454, 0.1718, 0.0091, 0.2368, 0.1152], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0114, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:37:18,277 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122256.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:37:19,978 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.844e+01 1.567e+02 1.786e+02 2.225e+02 4.203e+02, threshold=3.573e+02, percent-clipped=2.0 +2023-03-27 02:37:36,887 INFO [finetune.py:976] (5/7) Epoch 22, batch 2000, loss[loss=0.1815, simple_loss=0.2443, pruned_loss=0.0593, over 4827.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2457, pruned_loss=0.05119, over 951803.54 frames. ], batch size: 41, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:37:51,523 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122306.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:37:59,928 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122319.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:38:10,025 INFO [finetune.py:976] (5/7) Epoch 22, batch 2050, loss[loss=0.1537, simple_loss=0.2257, pruned_loss=0.04078, over 4820.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2419, pruned_loss=0.05021, over 950828.12 frames. ], batch size: 41, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:38:26,816 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.990e+01 1.410e+02 1.787e+02 2.088e+02 3.673e+02, threshold=3.574e+02, percent-clipped=1.0 +2023-03-27 02:38:36,532 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4762, 1.3737, 1.8232, 1.6446, 1.4583, 3.2105, 1.2630, 1.4674], + device='cuda:5'), covar=tensor([0.0907, 0.1760, 0.1151, 0.0967, 0.1673, 0.0279, 0.1600, 0.1764], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0073, 0.0076, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:38:43,002 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122380.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:38:44,605 INFO [finetune.py:976] (5/7) Epoch 22, batch 2100, loss[loss=0.1457, simple_loss=0.2264, pruned_loss=0.03251, over 4765.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2428, pruned_loss=0.05071, over 950749.05 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:39:09,857 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9618, 0.8734, 0.7738, 0.9446, 1.1150, 1.0725, 0.9281, 0.8809], + device='cuda:5'), covar=tensor([0.0375, 0.0300, 0.0709, 0.0321, 0.0259, 0.0395, 0.0285, 0.0381], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0143, 0.0111, 0.0099, 0.0111, 0.0100, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.6290e-05, 8.1550e-05, 1.1198e-04, 8.4805e-05, 7.6779e-05, 8.2280e-05, + 7.4336e-05, 8.5514e-05], device='cuda:5') +2023-03-27 02:39:28,192 INFO [finetune.py:976] (5/7) Epoch 22, batch 2150, loss[loss=0.1915, simple_loss=0.2599, pruned_loss=0.06156, over 4816.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2472, pruned_loss=0.05278, over 950769.19 frames. ], batch size: 40, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:39:37,549 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-27 02:40:03,768 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.048e+02 1.591e+02 1.910e+02 2.352e+02 5.051e+02, threshold=3.819e+02, percent-clipped=3.0 +2023-03-27 02:40:13,679 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122468.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:40:16,633 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122473.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 02:40:25,079 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-27 02:40:25,587 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-27 02:40:26,432 INFO [finetune.py:976] (5/7) Epoch 22, batch 2200, loss[loss=0.1589, simple_loss=0.2368, pruned_loss=0.04049, over 4897.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2482, pruned_loss=0.05278, over 953094.30 frames. ], batch size: 35, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:40:48,964 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2742, 1.4123, 1.4887, 0.8618, 1.4918, 1.6973, 1.7521, 1.3225], + device='cuda:5'), covar=tensor([0.0855, 0.0659, 0.0526, 0.0502, 0.0520, 0.0644, 0.0296, 0.0779], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0151, 0.0128, 0.0124, 0.0133, 0.0131, 0.0143, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.0519e-05, 1.0910e-04, 9.1411e-05, 8.7602e-05, 9.3525e-05, 9.3809e-05, + 1.0248e-04, 1.0701e-04], device='cuda:5') +2023-03-27 02:40:57,259 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122529.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:40:58,944 INFO [finetune.py:976] (5/7) Epoch 22, batch 2250, loss[loss=0.1664, simple_loss=0.2437, pruned_loss=0.04455, over 4760.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.2503, pruned_loss=0.05353, over 955234.47 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:41:12,977 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122551.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:41:17,776 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.977e+01 1.468e+02 1.830e+02 2.095e+02 3.153e+02, threshold=3.659e+02, percent-clipped=0.0 +2023-03-27 02:41:21,552 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.7352, 1.4127, 1.3847, 0.7799, 1.5308, 1.5832, 1.6456, 1.3542], + device='cuda:5'), covar=tensor([0.0910, 0.0706, 0.0610, 0.0617, 0.0528, 0.0756, 0.0414, 0.0735], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0151, 0.0128, 0.0125, 0.0133, 0.0132, 0.0143, 0.0150], + device='cuda:5'), out_proj_covar=tensor([9.0705e-05, 1.0924e-04, 9.1706e-05, 8.7818e-05, 9.3726e-05, 9.4044e-05, + 1.0279e-04, 1.0737e-04], device='cuda:5') +2023-03-27 02:41:31,701 INFO [finetune.py:976] (5/7) Epoch 22, batch 2300, loss[loss=0.1793, simple_loss=0.2458, pruned_loss=0.0564, over 4820.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2501, pruned_loss=0.05305, over 956206.26 frames. ], batch size: 30, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:41:49,495 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122606.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:42:05,214 INFO [finetune.py:976] (5/7) Epoch 22, batch 2350, loss[loss=0.1634, simple_loss=0.238, pruned_loss=0.04444, over 4812.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2475, pruned_loss=0.05201, over 954237.34 frames. ], batch size: 40, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:42:09,427 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1922, 4.9530, 4.7432, 2.6911, 5.0095, 3.9918, 1.2342, 3.6075], + device='cuda:5'), covar=tensor([0.2082, 0.1658, 0.1288, 0.3126, 0.0637, 0.0752, 0.4486, 0.1283], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0178, 0.0159, 0.0130, 0.0161, 0.0123, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 02:42:21,478 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=122654.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:42:24,441 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.047e+02 1.417e+02 1.625e+02 2.018e+02 3.172e+02, threshold=3.250e+02, percent-clipped=0.0 +2023-03-27 02:42:34,177 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122675.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:42:38,347 INFO [finetune.py:976] (5/7) Epoch 22, batch 2400, loss[loss=0.1939, simple_loss=0.2567, pruned_loss=0.06557, over 4811.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.246, pruned_loss=0.05181, over 957199.43 frames. ], batch size: 38, lr: 3.15e-03, grad_scale: 64.0 +2023-03-27 02:42:50,685 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1287, 1.9244, 2.1363, 1.5547, 2.1467, 2.2868, 2.2557, 1.5621], + device='cuda:5'), covar=tensor([0.0538, 0.0692, 0.0644, 0.0871, 0.0753, 0.0552, 0.0550, 0.1398], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0136, 0.0139, 0.0120, 0.0125, 0.0138, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:43:11,471 INFO [finetune.py:976] (5/7) Epoch 22, batch 2450, loss[loss=0.1635, simple_loss=0.2235, pruned_loss=0.0518, over 4267.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2433, pruned_loss=0.05104, over 956317.29 frames. ], batch size: 18, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:43:31,104 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.588e+02 1.841e+02 2.130e+02 2.968e+02, threshold=3.682e+02, percent-clipped=0.0 +2023-03-27 02:43:37,339 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9666, 1.9470, 1.5082, 1.7454, 1.8304, 1.7385, 1.8666, 2.4948], + device='cuda:5'), covar=tensor([0.4157, 0.3936, 0.3339, 0.3721, 0.4022, 0.2654, 0.3625, 0.2038], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0264, 0.0235, 0.0278, 0.0256, 0.0226, 0.0255, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:43:39,663 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122773.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 02:43:45,023 INFO [finetune.py:976] (5/7) Epoch 22, batch 2500, loss[loss=0.1591, simple_loss=0.2305, pruned_loss=0.0438, over 4777.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2443, pruned_loss=0.0516, over 953614.47 frames. ], batch size: 29, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:43:46,409 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.85 vs. limit=5.0 +2023-03-27 02:44:21,546 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=122821.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:44:23,363 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122824.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:44:28,113 INFO [finetune.py:976] (5/7) Epoch 22, batch 2550, loss[loss=0.1909, simple_loss=0.2695, pruned_loss=0.05618, over 4901.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2484, pruned_loss=0.05277, over 954640.69 frames. ], batch size: 36, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:44:42,587 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122851.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:44:53,794 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.164e+02 1.566e+02 1.889e+02 2.331e+02 3.878e+02, threshold=3.777e+02, percent-clipped=1.0 +2023-03-27 02:45:11,736 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122872.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:45:22,085 INFO [finetune.py:976] (5/7) Epoch 22, batch 2600, loss[loss=0.2221, simple_loss=0.291, pruned_loss=0.07659, over 4824.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.249, pruned_loss=0.05268, over 954944.39 frames. ], batch size: 39, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:45:40,402 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=122899.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:46:02,679 INFO [finetune.py:976] (5/7) Epoch 22, batch 2650, loss[loss=0.165, simple_loss=0.2219, pruned_loss=0.05403, over 4094.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2491, pruned_loss=0.0524, over 955204.79 frames. ], batch size: 17, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:46:03,434 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122933.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:46:19,591 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.898e+01 1.493e+02 1.785e+02 2.135e+02 3.458e+02, threshold=3.570e+02, percent-clipped=0.0 +2023-03-27 02:46:31,726 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122975.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:46:31,757 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7174, 2.9027, 2.7264, 1.9111, 2.8404, 3.0140, 3.0668, 2.5884], + device='cuda:5'), covar=tensor([0.0629, 0.0558, 0.0644, 0.0814, 0.0511, 0.0643, 0.0552, 0.0960], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0136, 0.0139, 0.0120, 0.0125, 0.0138, 0.0139, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:46:35,902 INFO [finetune.py:976] (5/7) Epoch 22, batch 2700, loss[loss=0.1913, simple_loss=0.2603, pruned_loss=0.06115, over 4802.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2482, pruned_loss=0.05165, over 955109.82 frames. ], batch size: 41, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:46:46,728 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-27 02:47:04,318 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=123023.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:47:09,739 INFO [finetune.py:976] (5/7) Epoch 22, batch 2750, loss[loss=0.1595, simple_loss=0.2321, pruned_loss=0.04347, over 2995.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2469, pruned_loss=0.05163, over 951395.92 frames. ], batch size: 12, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:47:16,473 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.5271, 3.9125, 4.1038, 4.3797, 4.2575, 4.0055, 4.6152, 1.3162], + device='cuda:5'), covar=tensor([0.0784, 0.0887, 0.0917, 0.0999, 0.1296, 0.1686, 0.0734, 0.6200], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0244, 0.0280, 0.0290, 0.0335, 0.0284, 0.0305, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:47:24,934 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7909, 1.1914, 0.9447, 1.6806, 2.2293, 1.3202, 1.5149, 1.6240], + device='cuda:5'), covar=tensor([0.1447, 0.2141, 0.1928, 0.1148, 0.1802, 0.2002, 0.1485, 0.1957], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0091, 0.0119, 0.0094, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 02:47:26,634 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.465e+02 1.704e+02 2.045e+02 3.535e+02, threshold=3.409e+02, percent-clipped=0.0 +2023-03-27 02:47:42,976 INFO [finetune.py:976] (5/7) Epoch 22, batch 2800, loss[loss=0.2155, simple_loss=0.2701, pruned_loss=0.08039, over 4088.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.2451, pruned_loss=0.05178, over 953567.34 frames. ], batch size: 65, lr: 3.14e-03, grad_scale: 64.0 +2023-03-27 02:47:48,493 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4749, 1.5226, 1.7799, 1.6722, 1.7796, 2.9951, 1.5741, 1.6595], + device='cuda:5'), covar=tensor([0.0977, 0.1712, 0.0981, 0.0894, 0.1389, 0.0410, 0.1346, 0.1672], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0092, 0.0081, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:47:56,779 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-27 02:48:11,383 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123124.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:48:12,007 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2238, 2.2202, 1.8806, 2.2730, 2.1548, 2.0351, 2.0433, 3.0529], + device='cuda:5'), covar=tensor([0.3576, 0.4706, 0.3260, 0.4235, 0.4395, 0.2533, 0.4246, 0.1556], + device='cuda:5'), in_proj_covar=tensor([0.0287, 0.0262, 0.0233, 0.0276, 0.0254, 0.0225, 0.0253, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:48:16,587 INFO [finetune.py:976] (5/7) Epoch 22, batch 2850, loss[loss=0.2096, simple_loss=0.269, pruned_loss=0.07512, over 4228.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.243, pruned_loss=0.051, over 953360.73 frames. ], batch size: 65, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:48:33,488 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.071e+02 1.606e+02 1.919e+02 2.375e+02 6.875e+02, threshold=3.839e+02, percent-clipped=7.0 +2023-03-27 02:48:42,230 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=123172.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:48:46,873 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7176, 1.6787, 1.6626, 1.8077, 1.3231, 4.4653, 1.5932, 1.9383], + device='cuda:5'), covar=tensor([0.3305, 0.2459, 0.2170, 0.2331, 0.1814, 0.0109, 0.2437, 0.1270], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0114, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:48:49,680 INFO [finetune.py:976] (5/7) Epoch 22, batch 2900, loss[loss=0.1951, simple_loss=0.2512, pruned_loss=0.06952, over 4801.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2455, pruned_loss=0.05207, over 954235.03 frames. ], batch size: 25, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:49:22,651 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123228.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:49:25,026 INFO [finetune.py:976] (5/7) Epoch 22, batch 2950, loss[loss=0.1462, simple_loss=0.2284, pruned_loss=0.03198, over 4745.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2485, pruned_loss=0.05267, over 953341.43 frames. ], batch size: 27, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:49:42,387 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.515e+01 1.556e+02 1.822e+02 2.269e+02 3.192e+02, threshold=3.643e+02, percent-clipped=0.0 +2023-03-27 02:49:59,814 INFO [finetune.py:976] (5/7) Epoch 22, batch 3000, loss[loss=0.1808, simple_loss=0.2558, pruned_loss=0.05288, over 4750.00 frames. ], tot_loss[loss=0.1781, simple_loss=0.2496, pruned_loss=0.05324, over 952863.26 frames. ], batch size: 59, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:49:59,815 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 02:50:15,178 INFO [finetune.py:1010] (5/7) Epoch 22, validation: loss=0.1575, simple_loss=0.2256, pruned_loss=0.04471, over 2265189.00 frames. +2023-03-27 02:50:15,178 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 02:50:27,882 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3015, 2.2125, 1.7671, 2.1544, 2.2114, 1.9610, 2.5027, 2.3062], + device='cuda:5'), covar=tensor([0.1354, 0.1982, 0.3039, 0.2608, 0.2477, 0.1749, 0.2726, 0.1838], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0189, 0.0236, 0.0255, 0.0249, 0.0205, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:50:47,934 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123314.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:51:07,178 INFO [finetune.py:976] (5/7) Epoch 22, batch 3050, loss[loss=0.1728, simple_loss=0.2512, pruned_loss=0.04724, over 4912.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2496, pruned_loss=0.0526, over 954022.99 frames. ], batch size: 38, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:51:27,200 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.922e+01 1.647e+02 1.960e+02 2.377e+02 4.726e+02, threshold=3.920e+02, percent-clipped=6.0 +2023-03-27 02:51:36,574 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123375.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:51:41,108 INFO [finetune.py:976] (5/7) Epoch 22, batch 3100, loss[loss=0.1934, simple_loss=0.2486, pruned_loss=0.06908, over 4925.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2479, pruned_loss=0.05228, over 953971.35 frames. ], batch size: 38, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:52:09,225 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5918, 1.4913, 1.4767, 1.5321, 1.3285, 3.2351, 1.5154, 1.8104], + device='cuda:5'), covar=tensor([0.3822, 0.2935, 0.2341, 0.2750, 0.1651, 0.0283, 0.2645, 0.1228], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0117, 0.0122, 0.0125, 0.0115, 0.0097, 0.0095, 0.0096], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:52:12,240 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123428.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:52:14,580 INFO [finetune.py:976] (5/7) Epoch 22, batch 3150, loss[loss=0.1537, simple_loss=0.2407, pruned_loss=0.03341, over 4835.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2456, pruned_loss=0.05216, over 954129.68 frames. ], batch size: 41, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:52:18,239 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.7248, 4.1050, 4.3138, 4.5199, 4.4795, 4.2530, 4.8382, 1.7094], + device='cuda:5'), covar=tensor([0.0661, 0.0736, 0.0812, 0.0863, 0.1101, 0.1376, 0.0526, 0.5645], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0246, 0.0282, 0.0292, 0.0337, 0.0287, 0.0307, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:52:34,407 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.134e+02 1.485e+02 1.808e+02 2.093e+02 3.688e+02, threshold=3.617e+02, percent-clipped=0.0 +2023-03-27 02:52:47,878 INFO [finetune.py:976] (5/7) Epoch 22, batch 3200, loss[loss=0.14, simple_loss=0.2049, pruned_loss=0.03756, over 4755.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.242, pruned_loss=0.0511, over 953669.05 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:52:48,024 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0988, 1.9629, 1.6687, 1.7559, 2.0850, 1.8445, 2.1867, 2.0932], + device='cuda:5'), covar=tensor([0.1335, 0.1935, 0.2924, 0.2510, 0.2437, 0.1596, 0.3360, 0.1742], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0188, 0.0235, 0.0254, 0.0248, 0.0204, 0.0214, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:52:48,647 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.8164, 1.8430, 1.9226, 1.1867, 1.9800, 2.1398, 2.0694, 1.6829], + device='cuda:5'), covar=tensor([0.0795, 0.0629, 0.0560, 0.0549, 0.0551, 0.0637, 0.0374, 0.0642], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0127, 0.0123, 0.0132, 0.0130, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9707e-05, 1.0787e-04, 9.1042e-05, 8.6330e-05, 9.2625e-05, 9.2797e-05, + 1.0106e-04, 1.0625e-04], device='cuda:5') +2023-03-27 02:52:52,837 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123489.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:53:18,979 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123528.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:53:21,322 INFO [finetune.py:976] (5/7) Epoch 22, batch 3250, loss[loss=0.1799, simple_loss=0.256, pruned_loss=0.05193, over 4732.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2419, pruned_loss=0.05091, over 952404.83 frames. ], batch size: 59, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:53:41,212 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.285e+01 1.537e+02 1.740e+02 2.121e+02 3.629e+02, threshold=3.481e+02, percent-clipped=1.0 +2023-03-27 02:53:51,148 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=123576.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:53:54,734 INFO [finetune.py:976] (5/7) Epoch 22, batch 3300, loss[loss=0.1752, simple_loss=0.2576, pruned_loss=0.04634, over 4763.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.244, pruned_loss=0.05085, over 954845.23 frames. ], batch size: 28, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:54:28,113 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-27 02:54:28,299 INFO [finetune.py:976] (5/7) Epoch 22, batch 3350, loss[loss=0.2509, simple_loss=0.3148, pruned_loss=0.09347, over 4868.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2469, pruned_loss=0.05175, over 954373.46 frames. ], batch size: 34, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:54:35,080 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9308, 1.7940, 2.3413, 1.6852, 2.0069, 2.2321, 1.7412, 2.3325], + device='cuda:5'), covar=tensor([0.1128, 0.1659, 0.1303, 0.1523, 0.0792, 0.1028, 0.2241, 0.0602], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0207, 0.0191, 0.0189, 0.0175, 0.0213, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:54:47,673 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.542e+02 1.809e+02 2.055e+02 5.285e+02, threshold=3.617e+02, percent-clipped=2.0 +2023-03-27 02:54:54,268 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123670.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:54:56,124 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1767, 2.0367, 1.5491, 0.6789, 1.6426, 1.7567, 1.5979, 1.8371], + device='cuda:5'), covar=tensor([0.0808, 0.0638, 0.1337, 0.1817, 0.1292, 0.2262, 0.2283, 0.0822], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0191, 0.0197, 0.0182, 0.0210, 0.0206, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:55:01,513 INFO [finetune.py:976] (5/7) Epoch 22, batch 3400, loss[loss=0.2129, simple_loss=0.2747, pruned_loss=0.07557, over 4886.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.249, pruned_loss=0.05279, over 955270.64 frames. ], batch size: 35, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:55:05,293 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7617, 3.9579, 3.7269, 2.0064, 4.0582, 3.0447, 0.7325, 2.8976], + device='cuda:5'), covar=tensor([0.2211, 0.1841, 0.1503, 0.3006, 0.0926, 0.1026, 0.4577, 0.1245], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0176, 0.0157, 0.0129, 0.0160, 0.0122, 0.0147, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 02:55:54,860 INFO [finetune.py:976] (5/7) Epoch 22, batch 3450, loss[loss=0.1805, simple_loss=0.2571, pruned_loss=0.05196, over 4807.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.249, pruned_loss=0.05292, over 954681.42 frames. ], batch size: 40, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:56:26,816 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.145e+02 1.545e+02 1.898e+02 2.347e+02 3.548e+02, threshold=3.797e+02, percent-clipped=0.0 +2023-03-27 02:56:45,230 INFO [finetune.py:976] (5/7) Epoch 22, batch 3500, loss[loss=0.1474, simple_loss=0.2268, pruned_loss=0.03399, over 4767.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2472, pruned_loss=0.05246, over 953443.30 frames. ], batch size: 54, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:56:46,497 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123784.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:57:18,480 INFO [finetune.py:976] (5/7) Epoch 22, batch 3550, loss[loss=0.1727, simple_loss=0.2387, pruned_loss=0.05335, over 4919.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2453, pruned_loss=0.05216, over 954869.81 frames. ], batch size: 43, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:57:18,581 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4935, 1.4089, 1.8403, 1.6780, 1.5709, 3.1871, 1.4008, 1.5932], + device='cuda:5'), covar=tensor([0.0888, 0.1599, 0.1203, 0.0903, 0.1425, 0.0243, 0.1285, 0.1554], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0091, 0.0081, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 02:57:36,086 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.429e+02 1.766e+02 2.143e+02 3.754e+02, threshold=3.531e+02, percent-clipped=0.0 +2023-03-27 02:57:45,477 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123872.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:57:52,356 INFO [finetune.py:976] (5/7) Epoch 22, batch 3600, loss[loss=0.1979, simple_loss=0.2507, pruned_loss=0.07261, over 4891.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2427, pruned_loss=0.05142, over 956279.83 frames. ], batch size: 32, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:58:19,357 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3434, 1.4012, 1.5143, 1.0871, 1.2520, 1.4663, 1.3652, 1.6497], + device='cuda:5'), covar=tensor([0.1264, 0.2194, 0.1374, 0.1500, 0.1018, 0.1281, 0.3017, 0.0955], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0206, 0.0191, 0.0189, 0.0174, 0.0213, 0.0216, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:58:25,720 INFO [finetune.py:976] (5/7) Epoch 22, batch 3650, loss[loss=0.1622, simple_loss=0.2223, pruned_loss=0.05108, over 4780.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2447, pruned_loss=0.05199, over 954805.56 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:58:26,484 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123933.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:58:38,955 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 02:58:43,175 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.106e+01 1.598e+02 1.925e+02 2.525e+02 5.508e+02, threshold=3.851e+02, percent-clipped=5.0 +2023-03-27 02:58:49,877 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123970.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:58:59,578 INFO [finetune.py:976] (5/7) Epoch 22, batch 3700, loss[loss=0.1512, simple_loss=0.2372, pruned_loss=0.03259, over 4920.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2472, pruned_loss=0.05261, over 953843.69 frames. ], batch size: 38, lr: 3.14e-03, grad_scale: 32.0 +2023-03-27 02:59:23,317 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=124018.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:59:34,575 INFO [finetune.py:976] (5/7) Epoch 22, batch 3750, loss[loss=0.1736, simple_loss=0.2454, pruned_loss=0.05088, over 4926.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2482, pruned_loss=0.05275, over 954501.78 frames. ], batch size: 33, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 02:59:37,147 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124036.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 02:59:43,351 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3088, 2.2135, 2.2477, 1.6661, 2.1850, 2.4083, 2.3416, 1.9131], + device='cuda:5'), covar=tensor([0.0614, 0.0643, 0.0726, 0.0829, 0.0692, 0.0693, 0.0626, 0.1097], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0134, 0.0138, 0.0119, 0.0124, 0.0137, 0.0137, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 02:59:51,700 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.541e+02 1.786e+02 2.095e+02 2.976e+02, threshold=3.572e+02, percent-clipped=0.0 +2023-03-27 02:59:53,042 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124062.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:00:02,479 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5154, 1.6487, 1.3461, 1.5537, 1.9458, 1.7478, 1.5223, 1.4650], + device='cuda:5'), covar=tensor([0.0379, 0.0277, 0.0583, 0.0289, 0.0189, 0.0523, 0.0334, 0.0373], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0142, 0.0110, 0.0098, 0.0110, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.6095e-05, 8.1364e-05, 1.1140e-04, 8.4734e-05, 7.6047e-05, 8.1469e-05, + 7.4396e-05, 8.4593e-05], device='cuda:5') +2023-03-27 03:00:06,947 INFO [finetune.py:976] (5/7) Epoch 22, batch 3800, loss[loss=0.189, simple_loss=0.2595, pruned_loss=0.05919, over 4884.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2492, pruned_loss=0.05317, over 956443.96 frames. ], batch size: 35, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:00:08,718 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124084.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:00:17,182 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124097.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:00:39,629 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124123.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:00:51,384 INFO [finetune.py:976] (5/7) Epoch 22, batch 3850, loss[loss=0.1968, simple_loss=0.2664, pruned_loss=0.0636, over 4813.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2478, pruned_loss=0.05215, over 956868.39 frames. ], batch size: 39, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:00:51,454 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=124132.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:01:20,791 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.486e+02 1.746e+02 2.060e+02 3.550e+02, threshold=3.491e+02, percent-clipped=0.0 +2023-03-27 03:01:37,029 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8492, 1.3164, 0.6760, 1.8269, 2.2666, 1.5036, 1.7821, 1.9334], + device='cuda:5'), covar=tensor([0.1361, 0.1963, 0.1986, 0.1026, 0.1731, 0.1755, 0.1238, 0.1722], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0111, 0.0092, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 03:01:39,545 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-27 03:01:46,862 INFO [finetune.py:976] (5/7) Epoch 22, batch 3900, loss[loss=0.1748, simple_loss=0.2476, pruned_loss=0.05096, over 4854.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.2443, pruned_loss=0.05091, over 955930.61 frames. ], batch size: 31, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:01:54,933 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4419, 2.2496, 1.9453, 2.3082, 2.3550, 2.0925, 2.6843, 2.4640], + device='cuda:5'), covar=tensor([0.1337, 0.2023, 0.2883, 0.2267, 0.2324, 0.1616, 0.2324, 0.1644], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0188, 0.0234, 0.0252, 0.0246, 0.0203, 0.0213, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:02:17,921 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124228.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:02:20,286 INFO [finetune.py:976] (5/7) Epoch 22, batch 3950, loss[loss=0.1637, simple_loss=0.2347, pruned_loss=0.04637, over 4812.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2412, pruned_loss=0.04975, over 956617.63 frames. ], batch size: 51, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:02:39,959 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.559e+02 1.897e+02 2.284e+02 3.853e+02, threshold=3.794e+02, percent-clipped=3.0 +2023-03-27 03:02:47,338 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124272.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:02:53,671 INFO [finetune.py:976] (5/7) Epoch 22, batch 4000, loss[loss=0.269, simple_loss=0.3131, pruned_loss=0.1124, over 4227.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2415, pruned_loss=0.05064, over 957684.48 frames. ], batch size: 65, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:03:12,059 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6814, 1.4986, 2.2742, 3.3412, 2.2848, 2.4851, 1.0981, 2.6664], + device='cuda:5'), covar=tensor([0.1620, 0.1289, 0.1130, 0.0462, 0.0753, 0.1500, 0.1646, 0.0515], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0164, 0.0100, 0.0136, 0.0125, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 03:03:26,973 INFO [finetune.py:976] (5/7) Epoch 22, batch 4050, loss[loss=0.137, simple_loss=0.2098, pruned_loss=0.0321, over 4754.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.2442, pruned_loss=0.05221, over 957964.28 frames. ], batch size: 27, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:03:27,710 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124333.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:03:39,253 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124348.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 03:03:46,865 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.585e+02 1.909e+02 2.213e+02 3.315e+02, threshold=3.818e+02, percent-clipped=0.0 +2023-03-27 03:04:00,186 INFO [finetune.py:976] (5/7) Epoch 22, batch 4100, loss[loss=0.1889, simple_loss=0.2663, pruned_loss=0.05574, over 4815.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2473, pruned_loss=0.05325, over 955051.43 frames. ], batch size: 45, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:04:07,292 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124392.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:04:14,210 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8619, 1.6070, 2.3465, 2.0099, 1.9973, 4.3994, 1.6490, 1.8629], + device='cuda:5'), covar=tensor([0.0875, 0.1811, 0.1090, 0.0952, 0.1572, 0.0186, 0.1468, 0.1815], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0092, 0.0081, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:04:19,532 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124409.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 03:04:24,896 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124418.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:04:29,726 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5000, 1.3484, 1.4869, 0.8795, 1.4671, 1.5107, 1.4915, 1.2637], + device='cuda:5'), covar=tensor([0.0589, 0.0760, 0.0669, 0.0932, 0.1026, 0.0688, 0.0612, 0.1247], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0135, 0.0139, 0.0120, 0.0125, 0.0137, 0.0138, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:04:33,283 INFO [finetune.py:976] (5/7) Epoch 22, batch 4150, loss[loss=0.1433, simple_loss=0.2184, pruned_loss=0.03407, over 4753.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2475, pruned_loss=0.05289, over 954911.54 frames. ], batch size: 54, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:04:33,392 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124432.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:04:38,492 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-03-27 03:04:53,595 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.819e+01 1.555e+02 1.746e+02 2.178e+02 5.076e+02, threshold=3.492e+02, percent-clipped=2.0 +2023-03-27 03:05:06,938 INFO [finetune.py:976] (5/7) Epoch 22, batch 4200, loss[loss=0.1571, simple_loss=0.2293, pruned_loss=0.04247, over 4904.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2485, pruned_loss=0.05279, over 954258.21 frames. ], batch size: 37, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:05:14,711 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124493.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:05:18,419 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 03:05:40,311 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124528.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:05:42,635 INFO [finetune.py:976] (5/7) Epoch 22, batch 4250, loss[loss=0.1767, simple_loss=0.2426, pruned_loss=0.05538, over 4906.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2468, pruned_loss=0.05203, over 954720.51 frames. ], batch size: 35, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:06:02,063 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.200e+01 1.490e+02 1.704e+02 2.072e+02 3.423e+02, threshold=3.408e+02, percent-clipped=0.0 +2023-03-27 03:06:12,354 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=124576.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:06:17,863 INFO [finetune.py:976] (5/7) Epoch 22, batch 4300, loss[loss=0.1374, simple_loss=0.2095, pruned_loss=0.03268, over 4822.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2447, pruned_loss=0.05168, over 956869.66 frames. ], batch size: 40, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:06:41,637 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.70 vs. limit=5.0 +2023-03-27 03:07:10,854 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124628.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:07:17,349 INFO [finetune.py:976] (5/7) Epoch 22, batch 4350, loss[loss=0.1931, simple_loss=0.2607, pruned_loss=0.06277, over 4824.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2426, pruned_loss=0.05131, over 956656.97 frames. ], batch size: 51, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:07:21,116 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124638.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:07:27,372 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4338, 1.5146, 1.6557, 0.8890, 1.6452, 1.8480, 1.8334, 1.5364], + device='cuda:5'), covar=tensor([0.0996, 0.0914, 0.0661, 0.0654, 0.0592, 0.0755, 0.0455, 0.0823], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0150, 0.0127, 0.0123, 0.0132, 0.0130, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([8.9912e-05, 1.0848e-04, 9.1212e-05, 8.6554e-05, 9.2403e-05, 9.3047e-05, + 1.0142e-04, 1.0682e-04], device='cuda:5') +2023-03-27 03:07:48,793 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.554e+01 1.414e+02 1.739e+02 2.148e+02 3.782e+02, threshold=3.479e+02, percent-clipped=2.0 +2023-03-27 03:07:51,292 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124663.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:08:06,294 INFO [finetune.py:976] (5/7) Epoch 22, batch 4400, loss[loss=0.1806, simple_loss=0.2557, pruned_loss=0.05277, over 4918.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2442, pruned_loss=0.05242, over 954135.49 frames. ], batch size: 36, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:08:12,505 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124692.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:08:17,320 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124699.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:08:20,770 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124704.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 03:08:31,138 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124718.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:08:31,175 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3542, 2.2851, 2.0385, 1.2038, 2.1324, 1.8459, 1.7026, 2.1205], + device='cuda:5'), covar=tensor([0.1110, 0.0671, 0.1467, 0.1805, 0.1519, 0.2147, 0.2167, 0.0902], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0193, 0.0199, 0.0183, 0.0211, 0.0208, 0.0225, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:08:35,333 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124724.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:08:40,099 INFO [finetune.py:976] (5/7) Epoch 22, batch 4450, loss[loss=0.1432, simple_loss=0.2079, pruned_loss=0.03923, over 4682.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2463, pruned_loss=0.05277, over 953845.38 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:08:40,242 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1281, 2.1659, 1.6664, 2.2082, 2.1496, 1.8720, 2.6540, 2.1976], + device='cuda:5'), covar=tensor([0.1225, 0.1888, 0.2925, 0.2600, 0.2138, 0.1516, 0.2406, 0.1697], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0189, 0.0235, 0.0254, 0.0248, 0.0204, 0.0216, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:08:40,797 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.5815, 3.9695, 4.2078, 4.3209, 4.3388, 4.1488, 4.6791, 1.8489], + device='cuda:5'), covar=tensor([0.0807, 0.0833, 0.0932, 0.1100, 0.1332, 0.1496, 0.0668, 0.5351], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0244, 0.0280, 0.0291, 0.0334, 0.0284, 0.0305, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:08:45,010 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=124740.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:08:58,178 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.229e+02 1.653e+02 1.910e+02 2.206e+02 5.425e+02, threshold=3.820e+02, percent-clipped=3.0 +2023-03-27 03:09:02,351 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=124766.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:09:13,853 INFO [finetune.py:976] (5/7) Epoch 22, batch 4500, loss[loss=0.2145, simple_loss=0.2806, pruned_loss=0.07413, over 4818.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2476, pruned_loss=0.05304, over 952008.90 frames. ], batch size: 40, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:09:17,611 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124788.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:09:32,103 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7536, 1.1449, 0.9757, 1.5369, 2.0540, 1.0731, 1.4834, 1.4764], + device='cuda:5'), covar=tensor([0.1414, 0.2203, 0.1785, 0.1180, 0.1877, 0.1913, 0.1466, 0.2101], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0092, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 03:09:38,625 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124820.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:09:47,291 INFO [finetune.py:976] (5/7) Epoch 22, batch 4550, loss[loss=0.1738, simple_loss=0.2503, pruned_loss=0.04872, over 4742.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2485, pruned_loss=0.05325, over 952989.13 frames. ], batch size: 54, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:09:49,431 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.49 vs. limit=5.0 +2023-03-27 03:10:04,841 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.024e+02 1.469e+02 1.742e+02 1.998e+02 4.285e+02, threshold=3.484e+02, percent-clipped=1.0 +2023-03-27 03:10:19,590 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124881.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:10:20,091 INFO [finetune.py:976] (5/7) Epoch 22, batch 4600, loss[loss=0.1476, simple_loss=0.2141, pruned_loss=0.04049, over 4190.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.2483, pruned_loss=0.05297, over 953228.06 frames. ], batch size: 18, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:10:32,157 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6370, 1.5412, 2.2498, 3.5328, 2.3498, 2.5098, 0.9886, 2.9202], + device='cuda:5'), covar=tensor([0.1818, 0.1428, 0.1299, 0.0534, 0.0830, 0.1321, 0.2011, 0.0494], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0100, 0.0135, 0.0124, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 03:10:38,152 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8638, 2.1247, 1.6882, 1.8075, 2.4494, 2.4257, 2.0664, 2.0283], + device='cuda:5'), covar=tensor([0.0493, 0.0299, 0.0586, 0.0351, 0.0245, 0.0616, 0.0322, 0.0425], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0143, 0.0111, 0.0098, 0.0111, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.6147e-05, 8.1445e-05, 1.1171e-04, 8.5116e-05, 7.6377e-05, 8.2059e-05, + 7.4816e-05, 8.4764e-05], device='cuda:5') +2023-03-27 03:10:46,022 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-27 03:10:50,959 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124928.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:10:53,307 INFO [finetune.py:976] (5/7) Epoch 22, batch 4650, loss[loss=0.1408, simple_loss=0.2183, pruned_loss=0.0317, over 4896.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2454, pruned_loss=0.05215, over 953688.98 frames. ], batch size: 43, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:11:10,703 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.467e+02 1.675e+02 2.008e+02 3.769e+02, threshold=3.350e+02, percent-clipped=1.0 +2023-03-27 03:11:21,853 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=124976.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:11:26,386 INFO [finetune.py:976] (5/7) Epoch 22, batch 4700, loss[loss=0.2314, simple_loss=0.2811, pruned_loss=0.09084, over 4182.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2436, pruned_loss=0.0518, over 954557.45 frames. ], batch size: 18, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:11:36,919 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124994.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:11:43,188 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125004.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 03:11:52,755 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125019.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:11:52,781 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4549, 1.0578, 0.8619, 1.3709, 1.7990, 0.8048, 1.2489, 1.3618], + device='cuda:5'), covar=tensor([0.1253, 0.1907, 0.1679, 0.1068, 0.1848, 0.2209, 0.1318, 0.1742], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0096, 0.0111, 0.0093, 0.0121, 0.0095, 0.0100, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 03:12:07,771 INFO [finetune.py:976] (5/7) Epoch 22, batch 4750, loss[loss=0.1794, simple_loss=0.2407, pruned_loss=0.05905, over 4813.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2424, pruned_loss=0.0514, over 955180.43 frames. ], batch size: 45, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:12:20,122 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5713, 1.0893, 0.8897, 1.4161, 2.1403, 1.0480, 1.3850, 1.4921], + device='cuda:5'), covar=tensor([0.1569, 0.2218, 0.1888, 0.1249, 0.1831, 0.1888, 0.1462, 0.1934], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0093, 0.0121, 0.0095, 0.0100, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 03:12:30,813 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=125052.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 03:12:39,937 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.109e+02 1.467e+02 1.710e+02 2.084e+02 3.277e+02, threshold=3.420e+02, percent-clipped=0.0 +2023-03-27 03:12:41,900 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9697, 0.8703, 0.9057, 0.9131, 1.1115, 1.0926, 0.9191, 0.9047], + device='cuda:5'), covar=tensor([0.0440, 0.0335, 0.0719, 0.0341, 0.0290, 0.0528, 0.0366, 0.0455], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0142, 0.0111, 0.0098, 0.0111, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.6211e-05, 8.1596e-05, 1.1176e-04, 8.5128e-05, 7.6323e-05, 8.1822e-05, + 7.4760e-05, 8.4971e-05], device='cuda:5') +2023-03-27 03:13:08,203 INFO [finetune.py:976] (5/7) Epoch 22, batch 4800, loss[loss=0.1575, simple_loss=0.2243, pruned_loss=0.04536, over 4762.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.245, pruned_loss=0.05215, over 955772.84 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:13:16,559 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125088.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:13:23,466 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.62 vs. limit=5.0 +2023-03-27 03:13:27,362 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4267, 1.3106, 1.3343, 1.3066, 0.8196, 2.2981, 0.7782, 1.1806], + device='cuda:5'), covar=tensor([0.3205, 0.2530, 0.2220, 0.2454, 0.1958, 0.0358, 0.2672, 0.1330], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0115, 0.0121, 0.0123, 0.0113, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:13:42,372 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125128.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:13:45,183 INFO [finetune.py:976] (5/7) Epoch 22, batch 4850, loss[loss=0.2315, simple_loss=0.3005, pruned_loss=0.08129, over 4849.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2485, pruned_loss=0.05321, over 955940.39 frames. ], batch size: 44, lr: 3.13e-03, grad_scale: 64.0 +2023-03-27 03:13:47,688 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=125136.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:14:04,216 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.665e+02 1.915e+02 2.224e+02 3.844e+02, threshold=3.831e+02, percent-clipped=1.0 +2023-03-27 03:14:14,554 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125176.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:14:18,598 INFO [finetune.py:976] (5/7) Epoch 22, batch 4900, loss[loss=0.182, simple_loss=0.2563, pruned_loss=0.05388, over 4803.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2494, pruned_loss=0.05315, over 956845.49 frames. ], batch size: 51, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:14:23,469 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125189.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:14:52,066 INFO [finetune.py:976] (5/7) Epoch 22, batch 4950, loss[loss=0.175, simple_loss=0.2453, pruned_loss=0.0524, over 4826.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2497, pruned_loss=0.05291, over 955401.37 frames. ], batch size: 47, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:15:12,018 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.098e+02 1.500e+02 1.887e+02 2.169e+02 5.445e+02, threshold=3.774e+02, percent-clipped=5.0 +2023-03-27 03:15:25,142 INFO [finetune.py:976] (5/7) Epoch 22, batch 5000, loss[loss=0.1577, simple_loss=0.2215, pruned_loss=0.04693, over 4666.00 frames. ], tot_loss[loss=0.1765, simple_loss=0.2483, pruned_loss=0.05242, over 955763.58 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:15:33,519 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125294.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:15:50,233 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125319.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:15:58,105 INFO [finetune.py:976] (5/7) Epoch 22, batch 5050, loss[loss=0.1816, simple_loss=0.2361, pruned_loss=0.0636, over 4729.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2453, pruned_loss=0.05202, over 953230.92 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 32.0 +2023-03-27 03:16:05,257 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=125342.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:16:15,332 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125356.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:16:18,275 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.708e+01 1.493e+02 1.877e+02 2.229e+02 3.838e+02, threshold=3.755e+02, percent-clipped=1.0 +2023-03-27 03:16:22,416 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=125367.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:16:27,963 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125376.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:16:31,543 INFO [finetune.py:976] (5/7) Epoch 22, batch 5100, loss[loss=0.1225, simple_loss=0.1928, pruned_loss=0.02607, over 4768.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.2415, pruned_loss=0.05035, over 954624.04 frames. ], batch size: 26, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:16:47,133 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5011, 1.5271, 2.2789, 1.8636, 1.8519, 4.1490, 1.6140, 1.7096], + device='cuda:5'), covar=tensor([0.1059, 0.1990, 0.1112, 0.1090, 0.1751, 0.0211, 0.1591, 0.1929], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:16:55,563 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125417.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 03:16:58,448 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125421.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:17:05,016 INFO [finetune.py:976] (5/7) Epoch 22, batch 5150, loss[loss=0.1531, simple_loss=0.2344, pruned_loss=0.0359, over 4870.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.2428, pruned_loss=0.05164, over 949557.41 frames. ], batch size: 34, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:17:13,055 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125437.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:17:23,283 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1842, 1.9570, 2.2021, 1.7075, 1.9157, 2.2356, 2.2640, 1.7585], + device='cuda:5'), covar=tensor([0.0434, 0.0591, 0.0544, 0.0697, 0.1066, 0.0486, 0.0403, 0.0951], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0134, 0.0137, 0.0119, 0.0124, 0.0136, 0.0137, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:17:26,823 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7199, 1.5461, 2.0611, 2.9918, 2.0504, 2.3399, 1.0004, 2.5254], + device='cuda:5'), covar=tensor([0.1645, 0.1392, 0.1166, 0.0571, 0.0823, 0.1172, 0.1822, 0.0505], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0164, 0.0101, 0.0136, 0.0124, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 03:17:30,579 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-27 03:17:33,158 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.608e+02 1.768e+02 2.290e+02 4.207e+02, threshold=3.536e+02, percent-clipped=1.0 +2023-03-27 03:17:36,284 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5859, 3.5712, 3.3420, 1.6691, 3.6802, 2.8281, 1.0143, 2.4272], + device='cuda:5'), covar=tensor([0.2607, 0.2321, 0.1669, 0.3546, 0.1194, 0.1006, 0.4216, 0.1691], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0179, 0.0162, 0.0130, 0.0162, 0.0124, 0.0150, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 03:17:45,316 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125476.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:17:46,519 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5670, 1.0363, 0.8071, 1.4006, 2.0420, 0.7681, 1.3361, 1.3369], + device='cuda:5'), covar=tensor([0.1396, 0.2174, 0.1660, 0.1144, 0.1688, 0.2103, 0.1510, 0.1981], + device='cuda:5'), in_proj_covar=tensor([0.0091, 0.0096, 0.0111, 0.0093, 0.0121, 0.0095, 0.0100, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 03:17:48,791 INFO [finetune.py:976] (5/7) Epoch 22, batch 5200, loss[loss=0.1948, simple_loss=0.2697, pruned_loss=0.05995, over 4811.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2462, pruned_loss=0.05233, over 950729.57 frames. ], batch size: 38, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:17:53,676 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125482.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:17:54,882 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125484.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:18:39,411 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=125524.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:18:44,721 INFO [finetune.py:976] (5/7) Epoch 22, batch 5250, loss[loss=0.1947, simple_loss=0.2635, pruned_loss=0.06292, over 4679.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2472, pruned_loss=0.052, over 951295.07 frames. ], batch size: 59, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:18:51,004 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 03:19:03,923 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.075e+02 1.572e+02 1.864e+02 2.118e+02 3.675e+02, threshold=3.728e+02, percent-clipped=1.0 +2023-03-27 03:19:18,599 INFO [finetune.py:976] (5/7) Epoch 22, batch 5300, loss[loss=0.1953, simple_loss=0.2418, pruned_loss=0.07447, over 4275.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2488, pruned_loss=0.05282, over 951690.88 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:19:22,410 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0174, 2.0154, 1.6979, 2.0390, 1.8410, 1.8314, 1.8879, 2.5430], + device='cuda:5'), covar=tensor([0.3425, 0.3618, 0.3228, 0.3376, 0.4012, 0.2259, 0.3647, 0.1646], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0261, 0.0233, 0.0276, 0.0254, 0.0224, 0.0252, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:19:32,532 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125603.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:19:52,427 INFO [finetune.py:976] (5/7) Epoch 22, batch 5350, loss[loss=0.1727, simple_loss=0.2478, pruned_loss=0.04883, over 4844.00 frames. ], tot_loss[loss=0.1765, simple_loss=0.2483, pruned_loss=0.05235, over 952854.06 frames. ], batch size: 44, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:20:10,863 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.759e+01 1.403e+02 1.743e+02 2.180e+02 4.274e+02, threshold=3.486e+02, percent-clipped=1.0 +2023-03-27 03:20:13,359 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125664.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:20:25,089 INFO [finetune.py:976] (5/7) Epoch 22, batch 5400, loss[loss=0.187, simple_loss=0.2486, pruned_loss=0.06275, over 4875.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2466, pruned_loss=0.05209, over 951809.24 frames. ], batch size: 31, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:20:44,654 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125712.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 03:20:53,957 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125725.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:20:58,062 INFO [finetune.py:976] (5/7) Epoch 22, batch 5450, loss[loss=0.1291, simple_loss=0.1941, pruned_loss=0.03206, over 4233.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2445, pruned_loss=0.05158, over 953242.60 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:20:58,131 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125732.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:21:17,000 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.003e+02 1.470e+02 1.717e+02 2.000e+02 3.602e+02, threshold=3.434e+02, percent-clipped=1.0 +2023-03-27 03:21:27,709 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125777.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:21:31,107 INFO [finetune.py:976] (5/7) Epoch 22, batch 5500, loss[loss=0.228, simple_loss=0.2824, pruned_loss=0.08683, over 4868.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2411, pruned_loss=0.05013, over 953496.85 frames. ], batch size: 31, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:21:32,415 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125784.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:21:33,664 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125786.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:21:56,444 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6390, 2.4541, 2.0664, 1.0855, 2.2288, 2.0153, 1.8556, 2.1707], + device='cuda:5'), covar=tensor([0.0933, 0.0771, 0.1675, 0.2081, 0.1422, 0.2289, 0.2344, 0.1047], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0193, 0.0200, 0.0183, 0.0211, 0.0208, 0.0225, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:22:04,441 INFO [finetune.py:976] (5/7) Epoch 22, batch 5550, loss[loss=0.2527, simple_loss=0.3026, pruned_loss=0.1014, over 4795.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2438, pruned_loss=0.05146, over 953004.09 frames. ], batch size: 51, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:22:04,491 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=125832.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:22:32,016 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.619e+02 1.949e+02 2.379e+02 4.295e+02, threshold=3.899e+02, percent-clipped=6.0 +2023-03-27 03:22:48,804 INFO [finetune.py:976] (5/7) Epoch 22, batch 5600, loss[loss=0.1358, simple_loss=0.2184, pruned_loss=0.02658, over 4746.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2478, pruned_loss=0.05237, over 954972.97 frames. ], batch size: 27, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:23:20,375 INFO [finetune.py:976] (5/7) Epoch 22, batch 5650, loss[loss=0.1451, simple_loss=0.2211, pruned_loss=0.03455, over 4736.00 frames. ], tot_loss[loss=0.1784, simple_loss=0.2508, pruned_loss=0.053, over 956356.41 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:23:47,405 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125959.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:23:48,546 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.919e+01 1.591e+02 1.865e+02 2.221e+02 3.612e+02, threshold=3.730e+02, percent-clipped=0.0 +2023-03-27 03:23:54,454 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-03-27 03:23:54,604 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7083, 1.7816, 2.2688, 1.9780, 1.9286, 3.7865, 1.7218, 1.8108], + device='cuda:5'), covar=tensor([0.0909, 0.1625, 0.0952, 0.0882, 0.1460, 0.0283, 0.1366, 0.1717], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0092, 0.0081, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:24:11,014 INFO [finetune.py:976] (5/7) Epoch 22, batch 5700, loss[loss=0.1693, simple_loss=0.2279, pruned_loss=0.05534, over 4338.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.246, pruned_loss=0.05188, over 937463.75 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:24:18,176 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8912, 1.9452, 2.4572, 2.1895, 2.0661, 3.8390, 1.9715, 1.9771], + device='cuda:5'), covar=tensor([0.0855, 0.1520, 0.0914, 0.0798, 0.1354, 0.0210, 0.1249, 0.1545], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:24:20,523 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125998.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:24:37,873 INFO [finetune.py:976] (5/7) Epoch 23, batch 0, loss[loss=0.1967, simple_loss=0.2686, pruned_loss=0.06243, over 4897.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2686, pruned_loss=0.06243, over 4897.00 frames. ], batch size: 37, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:24:37,873 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 03:24:44,229 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6991, 1.6293, 2.0898, 2.9087, 1.9562, 2.3132, 1.0885, 2.4699], + device='cuda:5'), covar=tensor([0.1491, 0.1187, 0.1002, 0.0541, 0.0866, 0.1114, 0.1532, 0.0482], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0163, 0.0100, 0.0135, 0.0123, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 03:24:45,225 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0444, 1.8010, 2.0493, 1.2961, 1.9803, 2.0193, 2.0581, 1.6821], + device='cuda:5'), covar=tensor([0.0518, 0.0738, 0.0602, 0.0865, 0.0779, 0.0656, 0.0585, 0.1166], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0134, 0.0138, 0.0119, 0.0123, 0.0137, 0.0137, 0.0160], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:24:53,038 INFO [finetune.py:1010] (5/7) Epoch 23, validation: loss=0.1587, simple_loss=0.2268, pruned_loss=0.04533, over 2265189.00 frames. +2023-03-27 03:24:53,039 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 03:24:59,383 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126012.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 03:25:12,072 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126032.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:25:30,344 INFO [finetune.py:976] (5/7) Epoch 23, batch 50, loss[loss=0.1613, simple_loss=0.233, pruned_loss=0.04485, over 4907.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2478, pruned_loss=0.05299, over 216736.96 frames. ], batch size: 46, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:25:30,933 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126059.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:25:31,917 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=126060.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:25:32,455 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.266e+01 1.545e+02 1.923e+02 2.325e+02 3.929e+02, threshold=3.846e+02, percent-clipped=1.0 +2023-03-27 03:25:42,853 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126077.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:25:44,666 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=126080.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:25:45,301 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126081.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:26:03,745 INFO [finetune.py:976] (5/7) Epoch 23, batch 100, loss[loss=0.1848, simple_loss=0.2597, pruned_loss=0.05496, over 4826.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2423, pruned_loss=0.05026, over 382073.66 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:26:11,480 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6103, 1.1173, 0.8124, 1.5309, 2.0654, 1.1658, 1.3848, 1.5124], + device='cuda:5'), covar=tensor([0.1442, 0.2160, 0.1892, 0.1173, 0.1982, 0.1965, 0.1527, 0.1906], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0095, 0.0110, 0.0092, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 03:26:14,893 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=126125.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:26:19,893 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1898, 2.7815, 2.6016, 1.3907, 2.6700, 2.3136, 2.2036, 2.4155], + device='cuda:5'), covar=tensor([0.0979, 0.0853, 0.1821, 0.2103, 0.1908, 0.2066, 0.1991, 0.1128], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0194, 0.0201, 0.0184, 0.0211, 0.0210, 0.0226, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:26:37,116 INFO [finetune.py:976] (5/7) Epoch 23, batch 150, loss[loss=0.1705, simple_loss=0.2406, pruned_loss=0.05019, over 4763.00 frames. ], tot_loss[loss=0.1699, simple_loss=0.2389, pruned_loss=0.05052, over 509304.75 frames. ], batch size: 26, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:26:38,294 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.556e+02 1.791e+02 2.255e+02 5.687e+02, threshold=3.583e+02, percent-clipped=3.0 +2023-03-27 03:27:10,673 INFO [finetune.py:976] (5/7) Epoch 23, batch 200, loss[loss=0.1708, simple_loss=0.2444, pruned_loss=0.04859, over 4831.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2389, pruned_loss=0.05075, over 609083.41 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 32.0 +2023-03-27 03:27:45,411 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 03:28:05,168 INFO [finetune.py:976] (5/7) Epoch 23, batch 250, loss[loss=0.2781, simple_loss=0.3228, pruned_loss=0.1168, over 4256.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2433, pruned_loss=0.05212, over 686078.30 frames. ], batch size: 65, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:28:05,281 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126259.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:28:06,384 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.143e+02 1.625e+02 1.849e+02 2.180e+02 4.181e+02, threshold=3.697e+02, percent-clipped=1.0 +2023-03-27 03:28:37,018 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=126307.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:28:40,580 INFO [finetune.py:976] (5/7) Epoch 23, batch 300, loss[loss=0.1954, simple_loss=0.2774, pruned_loss=0.05671, over 4895.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2454, pruned_loss=0.05145, over 744653.09 frames. ], batch size: 35, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:29:00,161 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.03 vs. limit=5.0 +2023-03-27 03:29:20,739 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126354.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:29:24,666 INFO [finetune.py:976] (5/7) Epoch 23, batch 350, loss[loss=0.1806, simple_loss=0.2573, pruned_loss=0.05195, over 4888.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2477, pruned_loss=0.05196, over 792288.15 frames. ], batch size: 32, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:29:25,830 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.521e+01 1.514e+02 1.805e+02 2.248e+02 3.946e+02, threshold=3.610e+02, percent-clipped=1.0 +2023-03-27 03:29:39,003 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126380.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:29:39,585 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126381.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:29:45,237 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.31 vs. limit=5.0 +2023-03-27 03:29:56,089 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9330, 1.3366, 2.0058, 1.9091, 1.7048, 1.6789, 1.8405, 1.8909], + device='cuda:5'), covar=tensor([0.3992, 0.4113, 0.3124, 0.3697, 0.4720, 0.4039, 0.4623, 0.3014], + device='cuda:5'), in_proj_covar=tensor([0.0259, 0.0244, 0.0264, 0.0287, 0.0284, 0.0262, 0.0293, 0.0246], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:29:59,416 INFO [finetune.py:976] (5/7) Epoch 23, batch 400, loss[loss=0.1491, simple_loss=0.2318, pruned_loss=0.03322, over 4779.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2488, pruned_loss=0.05184, over 829303.03 frames. ], batch size: 29, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:30:21,836 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=126429.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:30:25,102 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-27 03:30:29,127 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7571, 2.0375, 1.6694, 1.7858, 2.3297, 2.2722, 2.0155, 1.9512], + device='cuda:5'), covar=tensor([0.0505, 0.0314, 0.0599, 0.0307, 0.0305, 0.0537, 0.0342, 0.0377], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0142, 0.0111, 0.0098, 0.0111, 0.0101, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.6345e-05, 8.1182e-05, 1.1140e-04, 8.5128e-05, 7.6349e-05, 8.1906e-05, + 7.5054e-05, 8.4533e-05], device='cuda:5') +2023-03-27 03:30:29,722 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126441.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:30:35,181 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126450.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:30:40,549 INFO [finetune.py:976] (5/7) Epoch 23, batch 450, loss[loss=0.1938, simple_loss=0.266, pruned_loss=0.06085, over 4899.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2473, pruned_loss=0.05139, over 856929.47 frames. ], batch size: 32, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:30:42,257 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.347e+01 1.414e+02 1.639e+02 2.017e+02 3.767e+02, threshold=3.277e+02, percent-clipped=3.0 +2023-03-27 03:30:42,666 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-27 03:30:45,271 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2208, 2.0592, 1.9119, 2.3512, 2.7679, 2.2259, 2.0636, 1.6878], + device='cuda:5'), covar=tensor([0.2190, 0.1922, 0.1882, 0.1531, 0.1736, 0.1145, 0.2154, 0.1859], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0211, 0.0213, 0.0197, 0.0244, 0.0189, 0.0217, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:31:13,830 INFO [finetune.py:976] (5/7) Epoch 23, batch 500, loss[loss=0.1462, simple_loss=0.223, pruned_loss=0.03475, over 4894.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.246, pruned_loss=0.05086, over 880903.30 frames. ], batch size: 43, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:31:15,650 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126511.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:31:47,405 INFO [finetune.py:976] (5/7) Epoch 23, batch 550, loss[loss=0.1767, simple_loss=0.251, pruned_loss=0.05118, over 4833.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.244, pruned_loss=0.05079, over 895490.32 frames. ], batch size: 40, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:31:48,616 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.316e+01 1.588e+02 1.845e+02 2.407e+02 4.330e+02, threshold=3.691e+02, percent-clipped=4.0 +2023-03-27 03:32:13,005 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 03:32:21,216 INFO [finetune.py:976] (5/7) Epoch 23, batch 600, loss[loss=0.2166, simple_loss=0.2878, pruned_loss=0.07277, over 4853.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2451, pruned_loss=0.0517, over 909308.20 frames. ], batch size: 44, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:32:31,640 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-27 03:32:44,163 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.54 vs. limit=5.0 +2023-03-27 03:33:02,509 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126654.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:33:03,817 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7827, 1.6437, 1.4196, 1.3723, 1.8015, 1.5456, 1.8091, 1.7623], + device='cuda:5'), covar=tensor([0.1470, 0.2032, 0.3213, 0.2698, 0.2673, 0.1766, 0.3225, 0.1827], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0190, 0.0237, 0.0256, 0.0251, 0.0207, 0.0217, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:33:05,530 INFO [finetune.py:976] (5/7) Epoch 23, batch 650, loss[loss=0.1806, simple_loss=0.2564, pruned_loss=0.05246, over 4828.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.247, pruned_loss=0.05221, over 919193.66 frames. ], batch size: 47, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:33:06,760 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.083e+02 1.579e+02 1.902e+02 2.270e+02 1.001e+03, threshold=3.804e+02, percent-clipped=1.0 +2023-03-27 03:33:11,462 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-27 03:33:13,495 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-27 03:33:42,520 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=126702.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:33:47,143 INFO [finetune.py:976] (5/7) Epoch 23, batch 700, loss[loss=0.1711, simple_loss=0.2508, pruned_loss=0.04576, over 4762.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2478, pruned_loss=0.05192, over 928226.83 frames. ], batch size: 27, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:34:11,553 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126736.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:34:29,141 INFO [finetune.py:976] (5/7) Epoch 23, batch 750, loss[loss=0.1569, simple_loss=0.2401, pruned_loss=0.03688, over 4811.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2481, pruned_loss=0.05125, over 935409.29 frames. ], batch size: 41, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:34:30,819 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.063e+01 1.442e+02 1.710e+02 2.057e+02 3.398e+02, threshold=3.419e+02, percent-clipped=0.0 +2023-03-27 03:34:43,722 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-03-27 03:34:44,564 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3150, 1.9807, 2.5175, 4.1502, 2.7816, 2.7526, 1.2318, 3.3720], + device='cuda:5'), covar=tensor([0.1619, 0.1371, 0.1439, 0.0573, 0.0729, 0.1475, 0.1791, 0.0455], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0163, 0.0100, 0.0136, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 03:34:56,263 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8351, 1.6673, 2.0951, 3.4101, 2.2468, 2.5170, 1.4457, 2.7589], + device='cuda:5'), covar=tensor([0.1562, 0.1315, 0.1351, 0.0550, 0.0789, 0.1204, 0.1435, 0.0515], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0163, 0.0100, 0.0136, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 03:35:00,639 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126806.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:35:02,396 INFO [finetune.py:976] (5/7) Epoch 23, batch 800, loss[loss=0.2395, simple_loss=0.2986, pruned_loss=0.09021, over 4824.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2478, pruned_loss=0.05127, over 938154.92 frames. ], batch size: 30, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:35:07,272 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126816.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:35:44,514 INFO [finetune.py:976] (5/7) Epoch 23, batch 850, loss[loss=0.1513, simple_loss=0.227, pruned_loss=0.03782, over 4900.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.246, pruned_loss=0.05088, over 940450.91 frames. ], batch size: 43, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:35:45,684 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.329e+01 1.478e+02 1.768e+02 2.024e+02 3.574e+02, threshold=3.536e+02, percent-clipped=1.0 +2023-03-27 03:35:56,143 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126877.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:36:18,421 INFO [finetune.py:976] (5/7) Epoch 23, batch 900, loss[loss=0.2102, simple_loss=0.2737, pruned_loss=0.07331, over 4828.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.244, pruned_loss=0.05045, over 945564.49 frames. ], batch size: 39, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:36:19,717 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.9142, 4.2462, 4.4673, 4.6990, 4.6985, 4.4205, 5.0196, 1.4654], + device='cuda:5'), covar=tensor([0.0853, 0.0815, 0.0788, 0.1108, 0.1205, 0.1449, 0.0577, 0.5999], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0245, 0.0279, 0.0290, 0.0334, 0.0283, 0.0304, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:36:52,034 INFO [finetune.py:976] (5/7) Epoch 23, batch 950, loss[loss=0.1277, simple_loss=0.2057, pruned_loss=0.02484, over 4792.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2427, pruned_loss=0.05058, over 946148.62 frames. ], batch size: 29, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:36:53,226 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.486e+02 1.775e+02 2.132e+02 3.351e+02, threshold=3.551e+02, percent-clipped=0.0 +2023-03-27 03:37:06,077 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5891, 3.2606, 3.1365, 1.4976, 3.4299, 2.6930, 1.1108, 2.3949], + device='cuda:5'), covar=tensor([0.2208, 0.2317, 0.1682, 0.3501, 0.1108, 0.0911, 0.3790, 0.1494], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0177, 0.0159, 0.0128, 0.0160, 0.0123, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 03:37:26,098 INFO [finetune.py:976] (5/7) Epoch 23, batch 1000, loss[loss=0.1919, simple_loss=0.2458, pruned_loss=0.06895, over 4884.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2451, pruned_loss=0.0515, over 948192.49 frames. ], batch size: 32, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:37:28,006 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8958, 2.0862, 1.7014, 1.8181, 2.4149, 2.4792, 2.0595, 1.9973], + device='cuda:5'), covar=tensor([0.0446, 0.0315, 0.0604, 0.0370, 0.0265, 0.0622, 0.0348, 0.0387], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0106, 0.0142, 0.0111, 0.0098, 0.0111, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.6302e-05, 8.1042e-05, 1.1126e-04, 8.4920e-05, 7.6002e-05, 8.1681e-05, + 7.4361e-05, 8.4470e-05], device='cuda:5') +2023-03-27 03:37:43,484 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127036.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:38:01,444 INFO [finetune.py:976] (5/7) Epoch 23, batch 1050, loss[loss=0.1759, simple_loss=0.2489, pruned_loss=0.05144, over 4895.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.248, pruned_loss=0.05227, over 948616.05 frames. ], batch size: 35, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:38:02,653 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.597e+02 1.830e+02 2.248e+02 5.450e+02, threshold=3.660e+02, percent-clipped=4.0 +2023-03-27 03:38:10,483 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-03-27 03:38:27,256 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=127084.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:38:40,824 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7851, 1.6855, 1.8205, 1.1571, 1.8262, 1.9255, 1.8028, 1.5294], + device='cuda:5'), covar=tensor([0.0614, 0.0743, 0.0691, 0.0875, 0.0751, 0.0673, 0.0653, 0.1128], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0135, 0.0139, 0.0120, 0.0125, 0.0137, 0.0138, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:38:42,630 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127106.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:38:44,897 INFO [finetune.py:976] (5/7) Epoch 23, batch 1100, loss[loss=0.1682, simple_loss=0.2337, pruned_loss=0.05134, over 4025.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2481, pruned_loss=0.05207, over 948699.44 frames. ], batch size: 65, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:38:46,830 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6862, 1.5984, 1.4264, 1.7842, 1.9938, 1.8050, 1.3492, 1.4035], + device='cuda:5'), covar=tensor([0.2060, 0.1860, 0.1847, 0.1515, 0.1593, 0.1168, 0.2427, 0.1841], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0211, 0.0214, 0.0198, 0.0245, 0.0190, 0.0218, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:38:57,189 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7504, 1.6395, 2.0070, 1.9231, 1.8479, 3.5464, 1.5475, 1.7586], + device='cuda:5'), covar=tensor([0.0800, 0.1654, 0.0979, 0.0862, 0.1412, 0.0237, 0.1348, 0.1653], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:39:02,085 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4755, 1.4726, 1.9024, 1.7342, 1.6251, 3.4063, 1.3872, 1.5854], + device='cuda:5'), covar=tensor([0.0962, 0.1788, 0.1106, 0.1019, 0.1646, 0.0235, 0.1496, 0.1792], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0077, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:39:14,544 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=127154.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:39:21,917 INFO [finetune.py:976] (5/7) Epoch 23, batch 1150, loss[loss=0.169, simple_loss=0.2447, pruned_loss=0.04661, over 4693.00 frames. ], tot_loss[loss=0.1771, simple_loss=0.2491, pruned_loss=0.05255, over 950990.50 frames. ], batch size: 59, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:39:22,266 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 03:39:23,620 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.031e+02 1.547e+02 1.781e+02 2.032e+02 3.739e+02, threshold=3.562e+02, percent-clipped=1.0 +2023-03-27 03:39:35,834 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127172.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:40:04,147 INFO [finetune.py:976] (5/7) Epoch 23, batch 1200, loss[loss=0.166, simple_loss=0.2307, pruned_loss=0.05071, over 4884.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2474, pruned_loss=0.05206, over 952231.26 frames. ], batch size: 32, lr: 3.11e-03, grad_scale: 64.0 +2023-03-27 03:40:10,142 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1740, 1.9583, 2.1443, 0.9222, 2.4330, 2.5986, 2.2678, 1.8845], + device='cuda:5'), covar=tensor([0.0901, 0.0713, 0.0502, 0.0711, 0.0544, 0.0615, 0.0451, 0.0662], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0148, 0.0127, 0.0122, 0.0130, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9516e-05, 1.0709e-04, 9.0898e-05, 8.5929e-05, 9.1560e-05, 9.1630e-05, + 1.0076e-04, 1.0597e-04], device='cuda:5') +2023-03-27 03:40:33,025 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127249.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:40:42,287 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6824, 1.1522, 0.8763, 1.5080, 2.1725, 1.2222, 1.3383, 1.4852], + device='cuda:5'), covar=tensor([0.1499, 0.2232, 0.2044, 0.1272, 0.1717, 0.2029, 0.1598, 0.2117], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0092, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 03:40:44,044 INFO [finetune.py:976] (5/7) Epoch 23, batch 1250, loss[loss=0.1459, simple_loss=0.2215, pruned_loss=0.03518, over 4757.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2448, pruned_loss=0.05077, over 953625.28 frames. ], batch size: 26, lr: 3.11e-03, grad_scale: 64.0 +2023-03-27 03:40:45,215 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.489e+02 1.742e+02 2.248e+02 3.707e+02, threshold=3.484e+02, percent-clipped=1.0 +2023-03-27 03:41:21,264 INFO [finetune.py:976] (5/7) Epoch 23, batch 1300, loss[loss=0.1673, simple_loss=0.241, pruned_loss=0.04677, over 4829.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2423, pruned_loss=0.04989, over 953316.57 frames. ], batch size: 40, lr: 3.11e-03, grad_scale: 64.0 +2023-03-27 03:41:22,024 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127310.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:41:23,224 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7913, 1.8289, 1.7312, 1.8795, 1.4200, 4.3082, 1.6013, 1.9404], + device='cuda:5'), covar=tensor([0.3055, 0.2315, 0.1896, 0.2034, 0.1558, 0.0143, 0.2401, 0.1189], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0116, 0.0120, 0.0123, 0.0113, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:41:54,395 INFO [finetune.py:976] (5/7) Epoch 23, batch 1350, loss[loss=0.1378, simple_loss=0.2192, pruned_loss=0.02826, over 4824.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2418, pruned_loss=0.04929, over 953638.62 frames. ], batch size: 30, lr: 3.11e-03, grad_scale: 64.0 +2023-03-27 03:41:55,606 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.500e+02 1.797e+02 2.250e+02 4.549e+02, threshold=3.594e+02, percent-clipped=1.0 +2023-03-27 03:41:56,852 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0742, 2.0961, 2.0883, 2.1339, 1.8384, 3.7962, 1.9106, 2.4355], + device='cuda:5'), covar=tensor([0.2958, 0.2281, 0.1787, 0.2019, 0.1422, 0.0247, 0.2227, 0.1042], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0117, 0.0121, 0.0124, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:42:27,772 INFO [finetune.py:976] (5/7) Epoch 23, batch 1400, loss[loss=0.1829, simple_loss=0.2348, pruned_loss=0.06547, over 4788.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2461, pruned_loss=0.05134, over 953925.69 frames. ], batch size: 26, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:42:28,508 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1938, 1.3687, 1.4784, 0.7883, 1.3569, 1.6442, 1.6545, 1.3531], + device='cuda:5'), covar=tensor([0.0869, 0.0601, 0.0502, 0.0443, 0.0433, 0.0496, 0.0295, 0.0671], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0149, 0.0128, 0.0122, 0.0131, 0.0130, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.0077e-05, 1.0739e-04, 9.1468e-05, 8.6158e-05, 9.1887e-05, 9.2330e-05, + 1.0113e-04, 1.0634e-04], device='cuda:5') +2023-03-27 03:42:40,735 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127427.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 03:43:01,003 INFO [finetune.py:976] (5/7) Epoch 23, batch 1450, loss[loss=0.1835, simple_loss=0.2572, pruned_loss=0.05487, over 4901.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2479, pruned_loss=0.0519, over 953909.05 frames. ], batch size: 36, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:43:03,314 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.817e+01 1.596e+02 1.913e+02 2.194e+02 3.811e+02, threshold=3.827e+02, percent-clipped=1.0 +2023-03-27 03:43:09,877 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127472.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:43:25,064 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127488.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 03:43:26,262 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8847, 4.2173, 3.8348, 1.9395, 4.2130, 3.2720, 0.8570, 3.0224], + device='cuda:5'), covar=tensor([0.2101, 0.1850, 0.1515, 0.3312, 0.0901, 0.0949, 0.4638, 0.1377], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0178, 0.0160, 0.0129, 0.0161, 0.0124, 0.0150, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 03:43:46,710 INFO [finetune.py:976] (5/7) Epoch 23, batch 1500, loss[loss=0.1544, simple_loss=0.2363, pruned_loss=0.03627, over 4803.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2492, pruned_loss=0.05206, over 955729.47 frames. ], batch size: 25, lr: 3.11e-03, grad_scale: 32.0 +2023-03-27 03:43:54,392 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=127520.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:44:00,469 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127529.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:44:20,425 INFO [finetune.py:976] (5/7) Epoch 23, batch 1550, loss[loss=0.156, simple_loss=0.2266, pruned_loss=0.04269, over 4865.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2488, pruned_loss=0.05196, over 955964.07 frames. ], batch size: 31, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:44:22,241 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.462e+02 1.755e+02 2.123e+02 3.197e+02, threshold=3.511e+02, percent-clipped=0.0 +2023-03-27 03:44:48,015 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127590.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:45:04,792 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127605.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:45:07,683 INFO [finetune.py:976] (5/7) Epoch 23, batch 1600, loss[loss=0.1613, simple_loss=0.2228, pruned_loss=0.04986, over 4851.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2461, pruned_loss=0.05137, over 956008.68 frames. ], batch size: 49, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:45:34,766 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-03-27 03:45:40,961 INFO [finetune.py:976] (5/7) Epoch 23, batch 1650, loss[loss=0.1458, simple_loss=0.2211, pruned_loss=0.03529, over 4771.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2438, pruned_loss=0.05129, over 953925.83 frames. ], batch size: 28, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:45:43,336 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.066e+02 1.567e+02 1.812e+02 2.212e+02 4.212e+02, threshold=3.624e+02, percent-clipped=4.0 +2023-03-27 03:45:43,480 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127662.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:46:26,412 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1802, 2.8815, 3.0188, 2.9387, 2.7973, 2.7600, 3.2378, 0.9119], + device='cuda:5'), covar=tensor([0.1915, 0.2279, 0.2103, 0.2648, 0.2915, 0.3036, 0.2097, 0.8513], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0247, 0.0281, 0.0293, 0.0336, 0.0287, 0.0306, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:46:26,935 INFO [finetune.py:976] (5/7) Epoch 23, batch 1700, loss[loss=0.1768, simple_loss=0.2423, pruned_loss=0.05565, over 4830.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2413, pruned_loss=0.05038, over 955198.64 frames. ], batch size: 30, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:46:36,927 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127723.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:47:00,091 INFO [finetune.py:976] (5/7) Epoch 23, batch 1750, loss[loss=0.1792, simple_loss=0.2587, pruned_loss=0.04983, over 4812.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.244, pruned_loss=0.05146, over 954501.58 frames. ], batch size: 38, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:47:01,900 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.511e+02 1.783e+02 2.157e+02 3.427e+02, threshold=3.565e+02, percent-clipped=0.0 +2023-03-27 03:47:06,848 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5362, 1.4040, 1.2477, 1.5753, 1.5662, 1.5745, 0.9560, 1.2813], + device='cuda:5'), covar=tensor([0.2279, 0.2134, 0.2062, 0.1726, 0.1663, 0.1293, 0.2618, 0.1956], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0210, 0.0213, 0.0196, 0.0244, 0.0189, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:47:12,622 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2225, 3.6356, 3.8639, 4.0417, 3.9891, 3.7804, 4.2946, 1.3521], + device='cuda:5'), covar=tensor([0.0801, 0.0886, 0.0812, 0.0998, 0.1310, 0.1640, 0.0760, 0.6016], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0247, 0.0281, 0.0294, 0.0337, 0.0288, 0.0306, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:47:13,908 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127779.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:47:16,792 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127783.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 03:47:22,121 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0433, 1.7975, 1.5337, 1.4410, 2.5339, 2.7016, 1.9930, 1.9074], + device='cuda:5'), covar=tensor([0.0353, 0.0467, 0.0825, 0.0469, 0.0291, 0.0443, 0.0495, 0.0466], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0105, 0.0143, 0.0111, 0.0098, 0.0111, 0.0100, 0.0111], + device='cuda:5'), out_proj_covar=tensor([7.6405e-05, 8.0848e-05, 1.1179e-04, 8.4827e-05, 7.6365e-05, 8.1909e-05, + 7.4560e-05, 8.4357e-05], device='cuda:5') +2023-03-27 03:47:33,985 INFO [finetune.py:976] (5/7) Epoch 23, batch 1800, loss[loss=0.1726, simple_loss=0.2562, pruned_loss=0.04449, over 4827.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2462, pruned_loss=0.05205, over 953913.33 frames. ], batch size: 33, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:47:54,756 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127840.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:48:01,838 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127850.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:48:07,746 INFO [finetune.py:976] (5/7) Epoch 23, batch 1850, loss[loss=0.2203, simple_loss=0.2925, pruned_loss=0.07402, over 4835.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2478, pruned_loss=0.05271, over 953024.48 frames. ], batch size: 47, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:48:09,575 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.256e+01 1.480e+02 1.674e+02 2.095e+02 4.093e+02, threshold=3.347e+02, percent-clipped=1.0 +2023-03-27 03:48:12,662 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4038, 3.8136, 4.0633, 4.2299, 4.1189, 3.8410, 4.4857, 1.2781], + device='cuda:5'), covar=tensor([0.0772, 0.0883, 0.0813, 0.1007, 0.1213, 0.1743, 0.0633, 0.6287], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0248, 0.0283, 0.0296, 0.0339, 0.0290, 0.0307, 0.0305], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:48:16,138 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127872.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:48:19,846 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-27 03:48:24,938 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127885.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:48:40,240 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127905.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:48:42,628 INFO [finetune.py:976] (5/7) Epoch 23, batch 1900, loss[loss=0.14, simple_loss=0.2082, pruned_loss=0.03593, over 4739.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2486, pruned_loss=0.05265, over 953484.21 frames. ], batch size: 23, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:48:43,945 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127911.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:48:58,802 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127933.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:49:11,958 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=127953.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:49:16,016 INFO [finetune.py:976] (5/7) Epoch 23, batch 1950, loss[loss=0.1538, simple_loss=0.2209, pruned_loss=0.04333, over 4893.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2469, pruned_loss=0.0522, over 953481.58 frames. ], batch size: 32, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:49:17,849 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.653e+01 1.391e+02 1.764e+02 2.084e+02 3.552e+02, threshold=3.528e+02, percent-clipped=1.0 +2023-03-27 03:49:53,068 INFO [finetune.py:976] (5/7) Epoch 23, batch 2000, loss[loss=0.1829, simple_loss=0.2552, pruned_loss=0.05535, over 4824.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2447, pruned_loss=0.05104, over 955277.65 frames. ], batch size: 33, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:50:00,296 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.46 vs. limit=5.0 +2023-03-27 03:50:07,575 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128018.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:50:22,513 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 03:50:30,134 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6033, 1.5787, 1.5050, 0.8392, 1.7115, 1.8302, 1.8808, 1.4407], + device='cuda:5'), covar=tensor([0.0903, 0.0648, 0.0551, 0.0552, 0.0496, 0.0571, 0.0308, 0.0711], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0128, 0.0124, 0.0131, 0.0130, 0.0142, 0.0150], + device='cuda:5'), out_proj_covar=tensor([9.0507e-05, 1.0833e-04, 9.1427e-05, 8.7008e-05, 9.2164e-05, 9.2862e-05, + 1.0160e-04, 1.0723e-04], device='cuda:5') +2023-03-27 03:50:36,148 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-27 03:50:38,970 INFO [finetune.py:976] (5/7) Epoch 23, batch 2050, loss[loss=0.1617, simple_loss=0.2246, pruned_loss=0.04942, over 4872.00 frames. ], tot_loss[loss=0.1699, simple_loss=0.2407, pruned_loss=0.04958, over 956355.92 frames. ], batch size: 31, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:50:39,082 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4774, 1.4706, 1.5759, 1.5395, 1.7000, 3.0029, 1.4392, 1.5837], + device='cuda:5'), covar=tensor([0.0934, 0.1789, 0.1062, 0.1002, 0.1461, 0.0291, 0.1405, 0.1735], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0076, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:50:41,269 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.050e+02 1.483e+02 1.749e+02 2.101e+02 3.191e+02, threshold=3.498e+02, percent-clipped=0.0 +2023-03-27 03:50:48,157 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2116, 2.1669, 1.8319, 2.1193, 1.9902, 2.0164, 2.0306, 2.8506], + device='cuda:5'), covar=tensor([0.3920, 0.4317, 0.3305, 0.4063, 0.4479, 0.2525, 0.4024, 0.1577], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0234, 0.0275, 0.0256, 0.0227, 0.0254, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:50:54,711 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128083.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 03:51:13,975 INFO [finetune.py:976] (5/7) Epoch 23, batch 2100, loss[loss=0.144, simple_loss=0.2068, pruned_loss=0.04065, over 4760.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2419, pruned_loss=0.05054, over 956487.53 frames. ], batch size: 27, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:51:29,290 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6340, 1.5100, 1.9522, 1.7698, 1.6469, 3.6286, 1.3939, 1.5463], + device='cuda:5'), covar=tensor([0.0967, 0.1852, 0.1242, 0.1027, 0.1648, 0.0234, 0.1597, 0.1917], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0074, 0.0076, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:51:40,728 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=128131.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 03:51:43,620 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128135.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:51:49,950 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 03:52:00,527 INFO [finetune.py:976] (5/7) Epoch 23, batch 2150, loss[loss=0.1798, simple_loss=0.2569, pruned_loss=0.05136, over 4718.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2443, pruned_loss=0.05106, over 956053.46 frames. ], batch size: 59, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:52:02,382 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.558e+02 1.811e+02 2.178e+02 3.611e+02, threshold=3.622e+02, percent-clipped=2.0 +2023-03-27 03:52:17,002 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128185.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:52:29,431 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6024, 1.4406, 2.0933, 3.3596, 2.1700, 2.3683, 1.0821, 2.7603], + device='cuda:5'), covar=tensor([0.1847, 0.1570, 0.1379, 0.0570, 0.0831, 0.1682, 0.1859, 0.0486], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0133, 0.0163, 0.0100, 0.0136, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 03:52:29,453 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5749, 1.5048, 1.5067, 1.5293, 1.1424, 3.5275, 1.4065, 1.7459], + device='cuda:5'), covar=tensor([0.3409, 0.2569, 0.2118, 0.2438, 0.1841, 0.0204, 0.2727, 0.1317], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:52:31,854 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128206.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:52:33,604 INFO [finetune.py:976] (5/7) Epoch 23, batch 2200, loss[loss=0.161, simple_loss=0.2346, pruned_loss=0.04367, over 4822.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2469, pruned_loss=0.05194, over 955371.90 frames. ], batch size: 40, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:52:38,966 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.58 vs. limit=5.0 +2023-03-27 03:52:46,639 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128228.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:52:49,639 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=128233.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:53:03,237 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4760, 1.6183, 1.2475, 1.5703, 1.9088, 1.7589, 1.4872, 1.3451], + device='cuda:5'), covar=tensor([0.0402, 0.0337, 0.0711, 0.0338, 0.0231, 0.0492, 0.0377, 0.0467], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0107, 0.0145, 0.0112, 0.0100, 0.0111, 0.0101, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7370e-05, 8.1739e-05, 1.1327e-04, 8.5686e-05, 7.7459e-05, 8.2081e-05, + 7.5497e-05, 8.5548e-05], device='cuda:5') +2023-03-27 03:53:07,268 INFO [finetune.py:976] (5/7) Epoch 23, batch 2250, loss[loss=0.2194, simple_loss=0.2839, pruned_loss=0.07748, over 4889.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2478, pruned_loss=0.05204, over 954443.08 frames. ], batch size: 35, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:53:09,087 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.465e+01 1.491e+02 1.772e+02 2.217e+02 3.841e+02, threshold=3.544e+02, percent-clipped=1.0 +2023-03-27 03:53:13,165 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0156, 1.1877, 1.3718, 1.2945, 1.3217, 2.4618, 1.0326, 1.2428], + device='cuda:5'), covar=tensor([0.1401, 0.2703, 0.1317, 0.1233, 0.2210, 0.0485, 0.2305, 0.2737], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:53:22,740 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1823, 2.0059, 1.7086, 1.8035, 2.1109, 1.8484, 2.1718, 2.1097], + device='cuda:5'), covar=tensor([0.1325, 0.1898, 0.2856, 0.2389, 0.2400, 0.1599, 0.2595, 0.1715], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0189, 0.0236, 0.0256, 0.0251, 0.0206, 0.0216, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:53:40,845 INFO [finetune.py:976] (5/7) Epoch 23, batch 2300, loss[loss=0.1462, simple_loss=0.2149, pruned_loss=0.03879, over 4786.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2481, pruned_loss=0.052, over 951287.63 frames. ], batch size: 51, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:53:47,303 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128318.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:53:48,502 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128320.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:54:13,558 INFO [finetune.py:976] (5/7) Epoch 23, batch 2350, loss[loss=0.1509, simple_loss=0.2232, pruned_loss=0.03925, over 4764.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2465, pruned_loss=0.05143, over 952809.51 frames. ], batch size: 27, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:54:15,917 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.030e+02 1.510e+02 1.720e+02 2.103e+02 3.385e+02, threshold=3.440e+02, percent-clipped=0.0 +2023-03-27 03:54:18,455 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=128366.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:54:29,001 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128381.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:54:38,818 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-27 03:54:46,897 INFO [finetune.py:976] (5/7) Epoch 23, batch 2400, loss[loss=0.1577, simple_loss=0.2266, pruned_loss=0.04438, over 4914.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2431, pruned_loss=0.05029, over 954130.89 frames. ], batch size: 43, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:54:47,609 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3180, 4.5622, 4.8064, 5.1394, 5.0601, 4.7784, 5.4548, 1.6748], + device='cuda:5'), covar=tensor([0.0650, 0.0845, 0.0763, 0.0891, 0.1058, 0.1632, 0.0473, 0.5954], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0245, 0.0280, 0.0291, 0.0336, 0.0286, 0.0303, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:54:49,469 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128413.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:54:56,535 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6694, 1.6201, 2.0320, 1.8905, 1.7393, 3.6568, 1.5827, 1.7091], + device='cuda:5'), covar=tensor([0.1031, 0.1832, 0.1081, 0.0975, 0.1676, 0.0214, 0.1455, 0.1853], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0076, 0.0092, 0.0081, 0.0086, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:55:03,268 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6888, 2.5142, 2.0580, 1.0003, 2.2584, 2.0353, 1.8592, 2.2441], + device='cuda:5'), covar=tensor([0.0802, 0.0748, 0.1576, 0.2074, 0.1392, 0.2255, 0.2251, 0.0958], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0191, 0.0199, 0.0182, 0.0209, 0.0208, 0.0224, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 03:55:06,820 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128435.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:55:34,635 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 03:55:35,545 INFO [finetune.py:976] (5/7) Epoch 23, batch 2450, loss[loss=0.1394, simple_loss=0.2082, pruned_loss=0.03528, over 4762.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2405, pruned_loss=0.04978, over 955120.14 frames. ], batch size: 26, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:55:41,393 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.054e+01 1.492e+02 1.779e+02 2.209e+02 4.084e+02, threshold=3.557e+02, percent-clipped=2.0 +2023-03-27 03:55:49,811 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128474.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:55:55,747 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=128483.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:56:10,252 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128506.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:56:12,479 INFO [finetune.py:976] (5/7) Epoch 23, batch 2500, loss[loss=0.1957, simple_loss=0.2762, pruned_loss=0.05764, over 4920.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2423, pruned_loss=0.05056, over 954027.36 frames. ], batch size: 42, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:56:25,960 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128528.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:56:44,716 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3312, 1.4898, 1.6347, 1.5914, 1.7121, 3.0735, 1.4090, 1.5907], + device='cuda:5'), covar=tensor([0.1048, 0.1805, 0.1014, 0.0942, 0.1581, 0.0308, 0.1510, 0.1859], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0076, 0.0092, 0.0081, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 03:56:48,372 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=128554.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:56:55,780 INFO [finetune.py:976] (5/7) Epoch 23, batch 2550, loss[loss=0.1866, simple_loss=0.2668, pruned_loss=0.05319, over 4820.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2453, pruned_loss=0.05116, over 955132.73 frames. ], batch size: 40, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:56:58,590 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.563e+02 1.837e+02 2.202e+02 4.665e+02, threshold=3.674e+02, percent-clipped=3.0 +2023-03-27 03:57:11,629 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=128576.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:57:31,758 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8974, 2.1817, 1.6961, 1.8423, 2.5808, 2.5204, 2.0747, 1.9796], + device='cuda:5'), covar=tensor([0.0376, 0.0299, 0.0553, 0.0342, 0.0199, 0.0496, 0.0320, 0.0435], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0106, 0.0143, 0.0111, 0.0099, 0.0110, 0.0101, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.6806e-05, 8.1269e-05, 1.1239e-04, 8.4938e-05, 7.7171e-05, 8.1533e-05, + 7.5095e-05, 8.4938e-05], device='cuda:5') +2023-03-27 03:57:33,477 INFO [finetune.py:976] (5/7) Epoch 23, batch 2600, loss[loss=0.1735, simple_loss=0.2506, pruned_loss=0.04817, over 4897.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2456, pruned_loss=0.05082, over 956423.82 frames. ], batch size: 32, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:57:34,873 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 03:58:07,053 INFO [finetune.py:976] (5/7) Epoch 23, batch 2650, loss[loss=0.204, simple_loss=0.2628, pruned_loss=0.07263, over 4869.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2464, pruned_loss=0.05094, over 957083.67 frames. ], batch size: 31, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:58:08,892 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.089e+02 1.579e+02 1.820e+02 2.183e+02 3.562e+02, threshold=3.640e+02, percent-clipped=0.0 +2023-03-27 03:58:18,915 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128676.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:58:40,905 INFO [finetune.py:976] (5/7) Epoch 23, batch 2700, loss[loss=0.1487, simple_loss=0.2127, pruned_loss=0.04229, over 4710.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.245, pruned_loss=0.04978, over 957997.35 frames. ], batch size: 23, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:59:02,892 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.05 vs. limit=5.0 +2023-03-27 03:59:14,674 INFO [finetune.py:976] (5/7) Epoch 23, batch 2750, loss[loss=0.1819, simple_loss=0.2397, pruned_loss=0.06202, over 4219.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2426, pruned_loss=0.04931, over 958483.03 frames. ], batch size: 65, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 03:59:16,468 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.314e+01 1.516e+02 1.813e+02 2.146e+02 3.615e+02, threshold=3.627e+02, percent-clipped=0.0 +2023-03-27 03:59:21,319 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128769.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:59:30,014 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128781.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 03:59:48,514 INFO [finetune.py:976] (5/7) Epoch 23, batch 2800, loss[loss=0.1509, simple_loss=0.2218, pruned_loss=0.04001, over 4909.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2394, pruned_loss=0.04837, over 956729.15 frames. ], batch size: 32, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 04:00:10,900 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128842.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:00:22,070 INFO [finetune.py:976] (5/7) Epoch 23, batch 2850, loss[loss=0.1575, simple_loss=0.2309, pruned_loss=0.04204, over 4755.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2393, pruned_loss=0.0489, over 956857.77 frames. ], batch size: 26, lr: 3.10e-03, grad_scale: 32.0 +2023-03-27 04:00:23,884 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.021e+01 1.432e+02 1.754e+02 2.169e+02 4.729e+02, threshold=3.508e+02, percent-clipped=1.0 +2023-03-27 04:00:32,232 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7715, 1.6325, 2.0324, 3.3811, 2.2445, 2.5507, 1.0967, 2.7192], + device='cuda:5'), covar=tensor([0.1749, 0.1356, 0.1395, 0.0568, 0.0817, 0.1242, 0.1850, 0.0509], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0116, 0.0134, 0.0165, 0.0100, 0.0137, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:00:55,187 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128890.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:01:08,142 INFO [finetune.py:976] (5/7) Epoch 23, batch 2900, loss[loss=0.1617, simple_loss=0.2479, pruned_loss=0.03772, over 4805.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2418, pruned_loss=0.04985, over 956360.57 frames. ], batch size: 51, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:01:36,729 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128951.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 04:01:41,988 INFO [finetune.py:976] (5/7) Epoch 23, batch 2950, loss[loss=0.1601, simple_loss=0.2456, pruned_loss=0.03733, over 4931.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2456, pruned_loss=0.05086, over 956140.42 frames. ], batch size: 33, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:01:42,423 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 04:01:43,785 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.806e+01 1.599e+02 2.012e+02 2.314e+02 4.261e+02, threshold=4.024e+02, percent-clipped=1.0 +2023-03-27 04:01:52,365 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128976.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:02:00,330 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4408, 1.5241, 2.2319, 1.8559, 1.8158, 3.9685, 1.4733, 1.7073], + device='cuda:5'), covar=tensor([0.0962, 0.1794, 0.1295, 0.0964, 0.1580, 0.0236, 0.1509, 0.1758], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0076, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 04:02:31,486 INFO [finetune.py:976] (5/7) Epoch 23, batch 3000, loss[loss=0.2068, simple_loss=0.2781, pruned_loss=0.06775, over 4918.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2476, pruned_loss=0.05183, over 956734.20 frames. ], batch size: 42, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:02:31,486 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 04:02:34,409 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6312, 3.5760, 3.4309, 1.5286, 3.7023, 2.9183, 0.8238, 2.4437], + device='cuda:5'), covar=tensor([0.1866, 0.1633, 0.1319, 0.3172, 0.0962, 0.0910, 0.3464, 0.1553], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0178, 0.0160, 0.0129, 0.0162, 0.0124, 0.0149, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 04:02:42,304 INFO [finetune.py:1010] (5/7) Epoch 23, validation: loss=0.1567, simple_loss=0.225, pruned_loss=0.04424, over 2265189.00 frames. +2023-03-27 04:02:42,304 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 04:02:51,934 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=129024.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:03:09,357 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8853, 1.7381, 2.2983, 3.1847, 2.3062, 2.5292, 1.4755, 2.6028], + device='cuda:5'), covar=tensor([0.1404, 0.1165, 0.1040, 0.0555, 0.0634, 0.1884, 0.1390, 0.0517], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0117, 0.0135, 0.0166, 0.0101, 0.0138, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:03:14,565 INFO [finetune.py:976] (5/7) Epoch 23, batch 3050, loss[loss=0.1271, simple_loss=0.1982, pruned_loss=0.02803, over 4024.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2483, pruned_loss=0.05246, over 952577.75 frames. ], batch size: 17, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:03:15,141 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6913, 1.3987, 0.8679, 1.5544, 2.0716, 1.2705, 1.6859, 1.5888], + device='cuda:5'), covar=tensor([0.1443, 0.1859, 0.1920, 0.1143, 0.1878, 0.1973, 0.1268, 0.1882], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0111, 0.0092, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:03:16,833 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.606e+02 1.899e+02 2.236e+02 5.313e+02, threshold=3.798e+02, percent-clipped=3.0 +2023-03-27 04:03:22,098 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129069.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:03:45,604 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5230, 2.4058, 2.3170, 1.8528, 2.3254, 2.5685, 2.5575, 2.1392], + device='cuda:5'), covar=tensor([0.0595, 0.0672, 0.0854, 0.0862, 0.1020, 0.0747, 0.0672, 0.1074], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0136, 0.0139, 0.0120, 0.0125, 0.0138, 0.0138, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:03:47,841 INFO [finetune.py:976] (5/7) Epoch 23, batch 3100, loss[loss=0.1735, simple_loss=0.2531, pruned_loss=0.04695, over 4842.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2457, pruned_loss=0.05131, over 953081.74 frames. ], batch size: 47, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:03:48,757 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.34 vs. limit=5.0 +2023-03-27 04:03:53,280 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=129117.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:04:06,408 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129137.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:04:18,317 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4141, 2.2523, 1.9664, 2.3438, 2.0999, 2.1355, 2.1330, 2.8845], + device='cuda:5'), covar=tensor([0.3653, 0.3970, 0.3262, 0.3554, 0.4012, 0.2490, 0.3848, 0.1587], + device='cuda:5'), in_proj_covar=tensor([0.0286, 0.0260, 0.0233, 0.0274, 0.0254, 0.0225, 0.0252, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:04:20,606 INFO [finetune.py:976] (5/7) Epoch 23, batch 3150, loss[loss=0.1998, simple_loss=0.2708, pruned_loss=0.06434, over 4824.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2427, pruned_loss=0.05074, over 953450.79 frames. ], batch size: 41, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:04:22,457 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.503e+02 1.751e+02 2.296e+02 3.694e+02, threshold=3.502e+02, percent-clipped=0.0 +2023-03-27 04:04:45,023 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.12 vs. limit=2.0 +2023-03-27 04:05:01,881 INFO [finetune.py:976] (5/7) Epoch 23, batch 3200, loss[loss=0.199, simple_loss=0.2582, pruned_loss=0.06991, over 4827.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2399, pruned_loss=0.04974, over 954558.75 frames. ], batch size: 33, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:05:26,818 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129246.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:05:27,437 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5753, 2.2881, 2.4152, 1.7154, 2.3378, 2.5036, 2.5764, 2.0394], + device='cuda:5'), covar=tensor([0.0504, 0.0617, 0.0672, 0.0781, 0.0783, 0.0627, 0.0552, 0.1047], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0135, 0.0138, 0.0119, 0.0124, 0.0137, 0.0137, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:05:37,436 INFO [finetune.py:976] (5/7) Epoch 23, batch 3250, loss[loss=0.2054, simple_loss=0.2759, pruned_loss=0.06743, over 4821.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2398, pruned_loss=0.04983, over 953041.09 frames. ], batch size: 40, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:05:39,768 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.902e+01 1.487e+02 1.735e+02 2.015e+02 4.622e+02, threshold=3.470e+02, percent-clipped=1.0 +2023-03-27 04:06:22,328 INFO [finetune.py:976] (5/7) Epoch 23, batch 3300, loss[loss=0.1619, simple_loss=0.2456, pruned_loss=0.03913, over 4860.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2431, pruned_loss=0.0506, over 954424.37 frames. ], batch size: 31, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:06:41,598 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-27 04:06:56,063 INFO [finetune.py:976] (5/7) Epoch 23, batch 3350, loss[loss=0.2357, simple_loss=0.2937, pruned_loss=0.08882, over 4886.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2462, pruned_loss=0.05204, over 951178.28 frames. ], batch size: 32, lr: 3.09e-03, grad_scale: 32.0 +2023-03-27 04:06:57,832 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.264e+01 1.616e+02 1.807e+02 2.144e+02 4.365e+02, threshold=3.613e+02, percent-clipped=1.0 +2023-03-27 04:07:47,797 INFO [finetune.py:976] (5/7) Epoch 23, batch 3400, loss[loss=0.1905, simple_loss=0.2459, pruned_loss=0.06752, over 3996.00 frames. ], tot_loss[loss=0.1765, simple_loss=0.248, pruned_loss=0.05254, over 950825.60 frames. ], batch size: 17, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:07:50,384 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129413.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:08:02,165 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3164, 1.7440, 0.9121, 2.1154, 2.5838, 1.7641, 2.0527, 1.8258], + device='cuda:5'), covar=tensor([0.1420, 0.1847, 0.1837, 0.1055, 0.1649, 0.1826, 0.1312, 0.2021], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0110, 0.0092, 0.0119, 0.0094, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:08:02,186 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7697, 2.7467, 2.4450, 1.7718, 2.7227, 2.8218, 2.8489, 2.4261], + device='cuda:5'), covar=tensor([0.0577, 0.0611, 0.0779, 0.0854, 0.0676, 0.0714, 0.0572, 0.0986], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0136, 0.0139, 0.0119, 0.0125, 0.0138, 0.0138, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:08:07,364 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129437.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:08:21,220 INFO [finetune.py:976] (5/7) Epoch 23, batch 3450, loss[loss=0.1869, simple_loss=0.253, pruned_loss=0.06039, over 4924.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2469, pruned_loss=0.05146, over 951164.29 frames. ], batch size: 41, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:08:23,467 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.549e+02 1.981e+02 2.372e+02 4.494e+02, threshold=3.962e+02, percent-clipped=6.0 +2023-03-27 04:08:27,824 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.4559, 3.1155, 2.8640, 1.5072, 3.0040, 2.4283, 2.3927, 2.7369], + device='cuda:5'), covar=tensor([0.0747, 0.0775, 0.1407, 0.2010, 0.1405, 0.1908, 0.1843, 0.1042], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0191, 0.0199, 0.0181, 0.0210, 0.0208, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:08:31,791 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129474.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:08:39,422 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=129485.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:08:54,911 INFO [finetune.py:976] (5/7) Epoch 23, batch 3500, loss[loss=0.1965, simple_loss=0.2668, pruned_loss=0.06317, over 4835.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2454, pruned_loss=0.05129, over 953003.03 frames. ], batch size: 44, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:09:20,956 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129546.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:09:28,786 INFO [finetune.py:976] (5/7) Epoch 23, batch 3550, loss[loss=0.1685, simple_loss=0.2386, pruned_loss=0.04921, over 4798.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2419, pruned_loss=0.05026, over 954111.86 frames. ], batch size: 51, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:09:30,572 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.427e+01 1.468e+02 1.701e+02 2.000e+02 4.470e+02, threshold=3.402e+02, percent-clipped=1.0 +2023-03-27 04:09:52,297 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-27 04:09:52,444 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=129594.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 04:09:58,717 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1092, 1.7063, 0.7110, 2.1131, 2.4634, 1.7186, 1.9920, 2.1211], + device='cuda:5'), covar=tensor([0.1376, 0.1844, 0.2061, 0.0996, 0.1693, 0.1750, 0.1286, 0.1742], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0111, 0.0092, 0.0119, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:10:11,047 INFO [finetune.py:976] (5/7) Epoch 23, batch 3600, loss[loss=0.13, simple_loss=0.2061, pruned_loss=0.02695, over 4745.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2395, pruned_loss=0.04962, over 954106.44 frames. ], batch size: 27, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:10:12,979 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129612.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:10:34,294 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6827, 1.5468, 1.0920, 0.2590, 1.2904, 1.4851, 1.4400, 1.4721], + device='cuda:5'), covar=tensor([0.0990, 0.0795, 0.1377, 0.2055, 0.1481, 0.2349, 0.2379, 0.0876], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0192, 0.0200, 0.0182, 0.0211, 0.0209, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:10:40,596 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4366, 2.3641, 2.0118, 0.9880, 2.2453, 1.8915, 1.7981, 2.1799], + device='cuda:5'), covar=tensor([0.0945, 0.0744, 0.1723, 0.2032, 0.1359, 0.2150, 0.2115, 0.0999], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0192, 0.0200, 0.0182, 0.0211, 0.0209, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:10:44,782 INFO [finetune.py:976] (5/7) Epoch 23, batch 3650, loss[loss=0.1668, simple_loss=0.2454, pruned_loss=0.04411, over 4809.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2419, pruned_loss=0.05061, over 951174.64 frames. ], batch size: 41, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:10:46,576 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.022e+02 1.535e+02 1.802e+02 2.202e+02 3.404e+02, threshold=3.605e+02, percent-clipped=1.0 +2023-03-27 04:10:53,470 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129673.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:11:08,828 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8257, 1.6106, 1.4405, 1.3420, 1.5839, 1.5348, 1.5948, 2.1738], + device='cuda:5'), covar=tensor([0.3346, 0.3732, 0.2818, 0.3326, 0.3444, 0.2058, 0.3111, 0.1573], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0234, 0.0275, 0.0255, 0.0226, 0.0252, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:11:23,355 INFO [finetune.py:976] (5/7) Epoch 23, batch 3700, loss[loss=0.2066, simple_loss=0.2862, pruned_loss=0.06352, over 4906.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2462, pruned_loss=0.05211, over 951009.26 frames. ], batch size: 37, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:11:27,932 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5844, 1.5516, 2.0673, 3.3962, 2.2561, 2.3932, 1.2691, 2.7405], + device='cuda:5'), covar=tensor([0.1694, 0.1401, 0.1291, 0.0544, 0.0767, 0.1432, 0.1575, 0.0495], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0134, 0.0163, 0.0100, 0.0136, 0.0125, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:11:55,697 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5365, 1.3976, 1.2364, 1.5441, 1.6382, 1.5646, 1.0083, 1.2718], + device='cuda:5'), covar=tensor([0.2145, 0.1984, 0.2002, 0.1561, 0.1473, 0.1232, 0.2483, 0.1922], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0210, 0.0213, 0.0197, 0.0244, 0.0190, 0.0217, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:12:00,257 INFO [finetune.py:976] (5/7) Epoch 23, batch 3750, loss[loss=0.1824, simple_loss=0.2588, pruned_loss=0.05297, over 4826.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2472, pruned_loss=0.05219, over 950986.46 frames. ], batch size: 47, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:12:02,069 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.237e+01 1.564e+02 1.874e+02 2.284e+02 3.839e+02, threshold=3.748e+02, percent-clipped=2.0 +2023-03-27 04:12:06,378 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129769.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 04:12:06,429 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4258, 1.4009, 1.1988, 1.4365, 1.7323, 1.6016, 1.3765, 1.2126], + device='cuda:5'), covar=tensor([0.0343, 0.0327, 0.0639, 0.0315, 0.0221, 0.0484, 0.0336, 0.0466], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0107, 0.0145, 0.0111, 0.0099, 0.0111, 0.0102, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7477e-05, 8.1799e-05, 1.1324e-04, 8.5153e-05, 7.7341e-05, 8.2336e-05, + 7.5803e-05, 8.5047e-05], device='cuda:5') +2023-03-27 04:12:20,807 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129791.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:12:35,164 INFO [finetune.py:976] (5/7) Epoch 23, batch 3800, loss[loss=0.1755, simple_loss=0.2444, pruned_loss=0.05335, over 4254.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2474, pruned_loss=0.05181, over 949816.01 frames. ], batch size: 65, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:13:16,845 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8897, 2.8439, 2.6428, 1.8871, 2.7082, 2.8751, 3.0130, 2.3578], + device='cuda:5'), covar=tensor([0.0532, 0.0628, 0.0787, 0.0926, 0.0615, 0.0731, 0.0580, 0.1108], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0135, 0.0138, 0.0120, 0.0125, 0.0138, 0.0138, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:13:16,864 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129852.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:13:21,833 INFO [finetune.py:976] (5/7) Epoch 23, batch 3850, loss[loss=0.2515, simple_loss=0.2947, pruned_loss=0.1041, over 4748.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2471, pruned_loss=0.05181, over 950510.69 frames. ], batch size: 54, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:13:24,150 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.288e+01 1.597e+02 1.881e+02 2.160e+02 3.613e+02, threshold=3.763e+02, percent-clipped=0.0 +2023-03-27 04:13:42,309 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6409, 1.5166, 1.3616, 1.5217, 1.8550, 1.8092, 1.5148, 1.3330], + device='cuda:5'), covar=tensor([0.0348, 0.0289, 0.0677, 0.0293, 0.0235, 0.0409, 0.0394, 0.0420], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0107, 0.0145, 0.0112, 0.0100, 0.0112, 0.0102, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7892e-05, 8.2087e-05, 1.1361e-04, 8.5655e-05, 7.7571e-05, 8.2870e-05, + 7.6164e-05, 8.5486e-05], device='cuda:5') +2023-03-27 04:13:43,619 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 04:13:55,059 INFO [finetune.py:976] (5/7) Epoch 23, batch 3900, loss[loss=0.1702, simple_loss=0.2335, pruned_loss=0.05341, over 4821.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2438, pruned_loss=0.05065, over 951066.97 frames. ], batch size: 30, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:14:27,719 INFO [finetune.py:976] (5/7) Epoch 23, batch 3950, loss[loss=0.1544, simple_loss=0.2255, pruned_loss=0.04164, over 4906.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2424, pruned_loss=0.05056, over 955244.84 frames. ], batch size: 35, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:14:29,947 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.481e+02 1.820e+02 2.101e+02 4.779e+02, threshold=3.640e+02, percent-clipped=1.0 +2023-03-27 04:14:34,548 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129968.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:14:56,368 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130000.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:14:58,099 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130002.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:15:02,812 INFO [finetune.py:976] (5/7) Epoch 23, batch 4000, loss[loss=0.1432, simple_loss=0.2217, pruned_loss=0.0324, over 4794.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2408, pruned_loss=0.04987, over 956282.21 frames. ], batch size: 29, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:15:09,429 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.64 vs. limit=5.0 +2023-03-27 04:15:45,416 INFO [finetune.py:976] (5/7) Epoch 23, batch 4050, loss[loss=0.1715, simple_loss=0.2361, pruned_loss=0.05348, over 4689.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.244, pruned_loss=0.05088, over 956220.43 frames. ], batch size: 23, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:15:47,199 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130061.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:15:47,691 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.063e+02 1.665e+02 1.960e+02 2.479e+02 5.275e+02, threshold=3.921e+02, percent-clipped=4.0 +2023-03-27 04:15:48,456 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130063.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 04:15:51,861 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6012, 1.4440, 2.0437, 3.1666, 2.0222, 2.3460, 1.1670, 2.6043], + device='cuda:5'), covar=tensor([0.1659, 0.1512, 0.1215, 0.0612, 0.0823, 0.1283, 0.1685, 0.0481], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0099, 0.0136, 0.0123, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:15:53,610 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130069.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:16:12,152 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0204, 1.7009, 2.0621, 2.1178, 1.8276, 1.8297, 2.1309, 1.9525], + device='cuda:5'), covar=tensor([0.4289, 0.4256, 0.3370, 0.3856, 0.4634, 0.3843, 0.4553, 0.3081], + device='cuda:5'), in_proj_covar=tensor([0.0257, 0.0242, 0.0263, 0.0285, 0.0284, 0.0261, 0.0292, 0.0246], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:16:12,669 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6208, 3.3031, 3.1734, 1.7524, 3.4943, 2.6741, 1.2975, 2.4217], + device='cuda:5'), covar=tensor([0.3332, 0.2113, 0.1732, 0.3273, 0.1156, 0.1026, 0.3878, 0.1472], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0178, 0.0161, 0.0129, 0.0161, 0.0124, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 04:16:19,200 INFO [finetune.py:976] (5/7) Epoch 23, batch 4100, loss[loss=0.1535, simple_loss=0.2254, pruned_loss=0.04079, over 4779.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2463, pruned_loss=0.05149, over 955630.14 frames. ], batch size: 25, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:16:26,589 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=130117.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 04:16:54,986 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130147.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:17:02,641 INFO [finetune.py:976] (5/7) Epoch 23, batch 4150, loss[loss=0.1303, simple_loss=0.2123, pruned_loss=0.02414, over 4773.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2475, pruned_loss=0.05218, over 954622.19 frames. ], batch size: 28, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:17:04,906 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.036e+02 1.505e+02 1.863e+02 2.291e+02 4.324e+02, threshold=3.726e+02, percent-clipped=3.0 +2023-03-27 04:17:06,272 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0093, 1.7005, 2.3001, 1.5598, 2.1617, 2.2770, 1.5761, 2.4228], + device='cuda:5'), covar=tensor([0.1286, 0.2175, 0.1416, 0.1963, 0.0898, 0.1400, 0.3005, 0.0867], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0206, 0.0191, 0.0190, 0.0173, 0.0215, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:17:32,061 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6070, 1.4775, 1.5291, 1.5180, 1.1244, 3.3663, 1.2791, 1.6742], + device='cuda:5'), covar=tensor([0.3247, 0.2611, 0.2134, 0.2483, 0.1817, 0.0209, 0.2724, 0.1286], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 04:17:34,971 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0320, 2.8090, 2.5747, 3.2746, 2.9322, 2.8053, 3.4615, 3.0208], + device='cuda:5'), covar=tensor([0.1224, 0.2191, 0.2785, 0.2327, 0.2325, 0.1522, 0.2329, 0.1539], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0190, 0.0236, 0.0255, 0.0250, 0.0206, 0.0215, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:17:36,690 INFO [finetune.py:976] (5/7) Epoch 23, batch 4200, loss[loss=0.1659, simple_loss=0.2389, pruned_loss=0.04645, over 4833.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2482, pruned_loss=0.05179, over 955062.88 frames. ], batch size: 30, lr: 3.09e-03, grad_scale: 64.0 +2023-03-27 04:18:15,287 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3185, 2.2738, 1.7658, 2.2649, 2.2536, 1.9844, 2.5821, 2.2920], + device='cuda:5'), covar=tensor([0.1322, 0.1858, 0.2981, 0.2251, 0.2314, 0.1596, 0.2607, 0.1689], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0188, 0.0235, 0.0253, 0.0248, 0.0205, 0.0213, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:18:24,043 INFO [finetune.py:976] (5/7) Epoch 23, batch 4250, loss[loss=0.1794, simple_loss=0.2443, pruned_loss=0.05718, over 4811.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.247, pruned_loss=0.05188, over 956693.45 frames. ], batch size: 39, lr: 3.08e-03, grad_scale: 64.0 +2023-03-27 04:18:25,854 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.027e+02 1.516e+02 1.759e+02 2.094e+02 3.793e+02, threshold=3.518e+02, percent-clipped=1.0 +2023-03-27 04:18:27,898 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 04:18:30,083 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130268.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:18:40,549 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3851, 3.8445, 4.0006, 4.1835, 4.1151, 3.9077, 4.4601, 1.4797], + device='cuda:5'), covar=tensor([0.0708, 0.0823, 0.0846, 0.0931, 0.1175, 0.1573, 0.0763, 0.5449], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0245, 0.0278, 0.0292, 0.0337, 0.0286, 0.0304, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:18:44,131 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2354, 2.2086, 1.9179, 2.3446, 2.7774, 2.2602, 2.1033, 1.7772], + device='cuda:5'), covar=tensor([0.2023, 0.1781, 0.1763, 0.1448, 0.1581, 0.1078, 0.1969, 0.1804], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0210, 0.0213, 0.0196, 0.0245, 0.0190, 0.0216, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:18:55,821 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1551, 1.7772, 2.2053, 2.1395, 1.8624, 1.8367, 2.0917, 2.0404], + device='cuda:5'), covar=tensor([0.4297, 0.4134, 0.3215, 0.4132, 0.5117, 0.4133, 0.4598, 0.3071], + device='cuda:5'), in_proj_covar=tensor([0.0256, 0.0241, 0.0261, 0.0284, 0.0284, 0.0260, 0.0291, 0.0245], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:18:57,490 INFO [finetune.py:976] (5/7) Epoch 23, batch 4300, loss[loss=0.156, simple_loss=0.2168, pruned_loss=0.04757, over 4713.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2449, pruned_loss=0.05133, over 955883.91 frames. ], batch size: 59, lr: 3.08e-03, grad_scale: 64.0 +2023-03-27 04:19:02,813 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=130316.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:19:29,234 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130356.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:19:30,473 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130358.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 04:19:31,028 INFO [finetune.py:976] (5/7) Epoch 23, batch 4350, loss[loss=0.1408, simple_loss=0.2075, pruned_loss=0.03708, over 4272.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2422, pruned_loss=0.05067, over 952546.66 frames. ], batch size: 18, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:19:33,424 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.417e+02 1.746e+02 2.112e+02 4.412e+02, threshold=3.492e+02, percent-clipped=1.0 +2023-03-27 04:19:48,213 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130384.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:20:04,342 INFO [finetune.py:976] (5/7) Epoch 23, batch 4400, loss[loss=0.1505, simple_loss=0.2197, pruned_loss=0.04067, over 4856.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2435, pruned_loss=0.05175, over 952222.87 frames. ], batch size: 31, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:20:19,483 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130432.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:20:24,182 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130438.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:20:30,574 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130445.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:20:31,761 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130447.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:20:46,862 INFO [finetune.py:976] (5/7) Epoch 23, batch 4450, loss[loss=0.1727, simple_loss=0.2494, pruned_loss=0.04799, over 4840.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2457, pruned_loss=0.05175, over 952744.96 frames. ], batch size: 49, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:20:49,239 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.491e+02 1.813e+02 2.246e+02 3.707e+02, threshold=3.626e+02, percent-clipped=3.0 +2023-03-27 04:21:10,564 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130493.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:21:11,731 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=130495.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:21:14,694 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130499.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:21:20,673 INFO [finetune.py:976] (5/7) Epoch 23, batch 4500, loss[loss=0.1375, simple_loss=0.2116, pruned_loss=0.03166, over 4744.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2465, pruned_loss=0.0519, over 951176.75 frames. ], batch size: 23, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:21:53,320 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130548.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:22:04,071 INFO [finetune.py:976] (5/7) Epoch 23, batch 4550, loss[loss=0.1639, simple_loss=0.2332, pruned_loss=0.04731, over 4843.00 frames. ], tot_loss[loss=0.1765, simple_loss=0.248, pruned_loss=0.05254, over 952575.94 frames. ], batch size: 44, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:22:06,504 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.768e+01 1.546e+02 1.777e+02 2.233e+02 3.779e+02, threshold=3.553e+02, percent-clipped=2.0 +2023-03-27 04:22:07,808 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130565.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:22:25,163 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1419, 1.9854, 1.5734, 1.8888, 2.0503, 1.8092, 2.2569, 2.1119], + device='cuda:5'), covar=tensor([0.1386, 0.1969, 0.3111, 0.2400, 0.2550, 0.1717, 0.2992, 0.1691], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0188, 0.0234, 0.0252, 0.0247, 0.0204, 0.0212, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:22:37,458 INFO [finetune.py:976] (5/7) Epoch 23, batch 4600, loss[loss=0.1808, simple_loss=0.2508, pruned_loss=0.05536, over 4741.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2477, pruned_loss=0.05241, over 952307.26 frames. ], batch size: 26, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:22:37,571 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130609.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:22:47,765 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130626.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:23:10,854 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130656.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:23:17,084 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130658.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 04:23:17,586 INFO [finetune.py:976] (5/7) Epoch 23, batch 4650, loss[loss=0.2036, simple_loss=0.2579, pruned_loss=0.07469, over 4709.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2449, pruned_loss=0.05201, over 952343.62 frames. ], batch size: 23, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:23:19,982 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.906e+01 1.515e+02 1.768e+02 2.232e+02 6.495e+02, threshold=3.536e+02, percent-clipped=3.0 +2023-03-27 04:23:22,008 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0516, 1.8685, 1.6882, 1.6723, 1.7910, 1.8050, 1.8377, 2.5309], + device='cuda:5'), covar=tensor([0.3509, 0.3679, 0.2973, 0.3418, 0.3801, 0.2205, 0.3459, 0.1596], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0263, 0.0235, 0.0276, 0.0257, 0.0227, 0.0255, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:23:54,690 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=130704.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:23:55,883 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=130706.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:23:58,155 INFO [finetune.py:976] (5/7) Epoch 23, batch 4700, loss[loss=0.1251, simple_loss=0.201, pruned_loss=0.02462, over 4793.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.2428, pruned_loss=0.05165, over 954012.99 frames. ], batch size: 29, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:23:59,231 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-27 04:24:18,052 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130740.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:24:31,365 INFO [finetune.py:976] (5/7) Epoch 23, batch 4750, loss[loss=0.1806, simple_loss=0.2464, pruned_loss=0.0574, over 4800.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2418, pruned_loss=0.05126, over 954588.21 frames. ], batch size: 25, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:24:34,231 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.528e+02 1.803e+02 2.150e+02 3.686e+02, threshold=3.606e+02, percent-clipped=2.0 +2023-03-27 04:24:49,883 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130788.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:24:54,008 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130794.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:25:04,662 INFO [finetune.py:976] (5/7) Epoch 23, batch 4800, loss[loss=0.17, simple_loss=0.2452, pruned_loss=0.04741, over 4901.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2444, pruned_loss=0.05189, over 955975.37 frames. ], batch size: 32, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:25:29,574 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-27 04:25:31,602 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7517, 1.3383, 1.0379, 1.5981, 1.9426, 1.3281, 1.4435, 1.6237], + device='cuda:5'), covar=tensor([0.1271, 0.1783, 0.1698, 0.1008, 0.1905, 0.2037, 0.1296, 0.1629], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0111, 0.0093, 0.0120, 0.0094, 0.0100, 0.0090], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:25:37,286 INFO [finetune.py:976] (5/7) Epoch 23, batch 4850, loss[loss=0.1576, simple_loss=0.24, pruned_loss=0.03763, over 4808.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2466, pruned_loss=0.0522, over 954687.70 frames. ], batch size: 30, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:25:40,092 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.613e+02 1.947e+02 2.336e+02 6.046e+02, threshold=3.894e+02, percent-clipped=4.0 +2023-03-27 04:26:15,658 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130904.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 04:26:16,948 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7397, 0.9705, 1.7836, 1.7227, 1.5558, 1.4960, 1.6251, 1.6729], + device='cuda:5'), covar=tensor([0.3333, 0.3534, 0.2767, 0.2980, 0.3901, 0.3210, 0.3468, 0.2638], + device='cuda:5'), in_proj_covar=tensor([0.0258, 0.0242, 0.0263, 0.0286, 0.0285, 0.0262, 0.0293, 0.0247], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:26:19,114 INFO [finetune.py:976] (5/7) Epoch 23, batch 4900, loss[loss=0.1754, simple_loss=0.2595, pruned_loss=0.0456, over 4911.00 frames. ], tot_loss[loss=0.176, simple_loss=0.248, pruned_loss=0.05197, over 953942.63 frames. ], batch size: 38, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:26:28,433 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130921.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:26:52,303 INFO [finetune.py:976] (5/7) Epoch 23, batch 4950, loss[loss=0.1532, simple_loss=0.2347, pruned_loss=0.0358, over 4920.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2492, pruned_loss=0.05223, over 955465.18 frames. ], batch size: 41, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:26:57,593 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.290e+01 1.586e+02 1.789e+02 2.374e+02 3.586e+02, threshold=3.578e+02, percent-clipped=0.0 +2023-03-27 04:27:27,164 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.66 vs. limit=5.0 +2023-03-27 04:27:36,349 INFO [finetune.py:976] (5/7) Epoch 23, batch 5000, loss[loss=0.1552, simple_loss=0.2292, pruned_loss=0.0406, over 4809.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2471, pruned_loss=0.05139, over 956622.92 frames. ], batch size: 40, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:27:42,763 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2297, 2.1065, 2.1341, 1.5871, 2.0959, 2.1681, 2.2235, 1.7264], + device='cuda:5'), covar=tensor([0.0497, 0.0628, 0.0689, 0.0814, 0.0738, 0.0745, 0.0544, 0.1145], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0135, 0.0138, 0.0118, 0.0125, 0.0137, 0.0137, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:27:45,074 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5500, 1.3826, 1.2880, 1.5848, 1.5748, 1.5719, 0.8698, 1.2986], + device='cuda:5'), covar=tensor([0.2289, 0.2179, 0.2222, 0.1808, 0.1654, 0.1362, 0.2864, 0.2124], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0209, 0.0212, 0.0195, 0.0242, 0.0189, 0.0215, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:27:57,572 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131040.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:28:09,927 INFO [finetune.py:976] (5/7) Epoch 23, batch 5050, loss[loss=0.1499, simple_loss=0.225, pruned_loss=0.03742, over 4815.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2442, pruned_loss=0.05038, over 956497.65 frames. ], batch size: 51, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:28:12,371 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.026e+02 1.381e+02 1.770e+02 2.059e+02 4.416e+02, threshold=3.539e+02, percent-clipped=4.0 +2023-03-27 04:28:41,480 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=131088.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:28:41,508 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131088.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:28:45,162 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131094.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:28:50,888 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8079, 3.9019, 3.7767, 1.9989, 4.0358, 3.1013, 0.8761, 2.7434], + device='cuda:5'), covar=tensor([0.2290, 0.2109, 0.1554, 0.3496, 0.0944, 0.0949, 0.4693, 0.1676], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0176, 0.0159, 0.0128, 0.0159, 0.0121, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 04:28:57,921 INFO [finetune.py:976] (5/7) Epoch 23, batch 5100, loss[loss=0.1735, simple_loss=0.2389, pruned_loss=0.05399, over 4763.00 frames. ], tot_loss[loss=0.1699, simple_loss=0.2412, pruned_loss=0.04933, over 956987.35 frames. ], batch size: 28, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:29:17,262 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=131136.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:29:20,315 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1690, 3.6182, 3.7751, 3.9423, 3.9113, 3.7142, 4.2422, 1.5014], + device='cuda:5'), covar=tensor([0.0856, 0.0947, 0.1017, 0.1161, 0.1370, 0.1662, 0.0745, 0.5889], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0246, 0.0280, 0.0294, 0.0339, 0.0286, 0.0305, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:29:20,884 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=131142.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:29:31,083 INFO [finetune.py:976] (5/7) Epoch 23, batch 5150, loss[loss=0.1886, simple_loss=0.2696, pruned_loss=0.05378, over 4826.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.2429, pruned_loss=0.051, over 954136.38 frames. ], batch size: 47, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:29:34,469 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.214e+01 1.572e+02 1.903e+02 2.241e+02 4.010e+02, threshold=3.805e+02, percent-clipped=1.0 +2023-03-27 04:29:41,816 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131174.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:30:01,340 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131204.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:30:04,269 INFO [finetune.py:976] (5/7) Epoch 23, batch 5200, loss[loss=0.1707, simple_loss=0.2538, pruned_loss=0.04381, over 4928.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2449, pruned_loss=0.05082, over 955143.31 frames. ], batch size: 38, lr: 3.08e-03, grad_scale: 32.0 +2023-03-27 04:30:08,450 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 04:30:12,612 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131221.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:30:23,001 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131235.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 04:30:27,307 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 04:30:33,247 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=131252.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:30:37,395 INFO [finetune.py:976] (5/7) Epoch 23, batch 5250, loss[loss=0.166, simple_loss=0.2459, pruned_loss=0.0431, over 4791.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2475, pruned_loss=0.05185, over 953645.79 frames. ], batch size: 29, lr: 3.08e-03, grad_scale: 16.0 +2023-03-27 04:30:40,887 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.531e+02 1.792e+02 2.239e+02 3.281e+02, threshold=3.585e+02, percent-clipped=0.0 +2023-03-27 04:30:44,380 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=131269.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:31:21,562 INFO [finetune.py:976] (5/7) Epoch 23, batch 5300, loss[loss=0.1937, simple_loss=0.2678, pruned_loss=0.05982, over 4851.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2482, pruned_loss=0.05146, over 953825.86 frames. ], batch size: 44, lr: 3.08e-03, grad_scale: 16.0 +2023-03-27 04:31:38,955 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2818, 2.1570, 1.6430, 2.1631, 2.0999, 1.8918, 2.4630, 2.1984], + device='cuda:5'), covar=tensor([0.1320, 0.1828, 0.2767, 0.2419, 0.2349, 0.1515, 0.3251, 0.1624], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0188, 0.0234, 0.0252, 0.0247, 0.0205, 0.0213, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:31:54,360 INFO [finetune.py:976] (5/7) Epoch 23, batch 5350, loss[loss=0.1677, simple_loss=0.248, pruned_loss=0.04374, over 4747.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2482, pruned_loss=0.05146, over 952270.51 frames. ], batch size: 54, lr: 3.08e-03, grad_scale: 16.0 +2023-03-27 04:31:57,384 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.087e+02 1.529e+02 1.830e+02 2.196e+02 3.219e+02, threshold=3.659e+02, percent-clipped=0.0 +2023-03-27 04:32:00,583 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6598, 1.5992, 1.6345, 0.9749, 1.8093, 2.0150, 1.9268, 1.4929], + device='cuda:5'), covar=tensor([0.1079, 0.0764, 0.0620, 0.0620, 0.0432, 0.0634, 0.0396, 0.0838], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0126, 0.0123, 0.0131, 0.0130, 0.0141, 0.0149], + device='cuda:5'), out_proj_covar=tensor([8.9378e-05, 1.0733e-04, 9.0309e-05, 8.6777e-05, 9.2107e-05, 9.2387e-05, + 1.0061e-04, 1.0644e-04], device='cuda:5') +2023-03-27 04:32:33,381 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7496, 0.9185, 1.7724, 1.7610, 1.6012, 1.5428, 1.6421, 1.7203], + device='cuda:5'), covar=tensor([0.3590, 0.3879, 0.3110, 0.3224, 0.4300, 0.3605, 0.3971, 0.2879], + device='cuda:5'), in_proj_covar=tensor([0.0259, 0.0244, 0.0265, 0.0288, 0.0287, 0.0263, 0.0295, 0.0249], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:32:38,104 INFO [finetune.py:976] (5/7) Epoch 23, batch 5400, loss[loss=0.1994, simple_loss=0.2562, pruned_loss=0.07129, over 4888.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2462, pruned_loss=0.05161, over 953648.90 frames. ], batch size: 35, lr: 3.08e-03, grad_scale: 16.0 +2023-03-27 04:32:38,194 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131409.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:32:38,238 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5440, 2.4004, 1.7869, 2.4529, 2.2311, 1.9720, 2.8297, 2.4994], + device='cuda:5'), covar=tensor([0.1316, 0.2124, 0.3287, 0.2873, 0.2846, 0.1701, 0.3829, 0.1711], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0187, 0.0234, 0.0252, 0.0247, 0.0204, 0.0213, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:32:42,447 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131416.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:32:43,643 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3724, 1.3613, 1.5238, 1.4377, 1.4972, 2.9774, 1.3194, 1.4653], + device='cuda:5'), covar=tensor([0.0968, 0.1799, 0.1112, 0.1017, 0.1648, 0.0294, 0.1530, 0.1770], + device='cuda:5'), in_proj_covar=tensor([0.0073, 0.0081, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 04:32:59,793 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-27 04:33:11,756 INFO [finetune.py:976] (5/7) Epoch 23, batch 5450, loss[loss=0.1439, simple_loss=0.2179, pruned_loss=0.03496, over 3968.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2433, pruned_loss=0.0508, over 952354.18 frames. ], batch size: 17, lr: 3.08e-03, grad_scale: 16.0 +2023-03-27 04:33:14,786 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.514e+02 1.875e+02 2.409e+02 5.439e+02, threshold=3.749e+02, percent-clipped=4.0 +2023-03-27 04:33:18,571 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131470.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:33:23,323 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131477.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:33:33,348 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131492.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:33:48,969 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1052, 1.1940, 0.6837, 1.9186, 2.4798, 1.8936, 1.6949, 1.7869], + device='cuda:5'), covar=tensor([0.1538, 0.2444, 0.2289, 0.1308, 0.1829, 0.1930, 0.1635, 0.2220], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0092, 0.0119, 0.0093, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:33:51,832 INFO [finetune.py:976] (5/7) Epoch 23, batch 5500, loss[loss=0.1543, simple_loss=0.2294, pruned_loss=0.03957, over 4322.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2392, pruned_loss=0.04944, over 950480.04 frames. ], batch size: 19, lr: 3.08e-03, grad_scale: 16.0 +2023-03-27 04:33:59,695 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6205, 2.2964, 1.8831, 0.9220, 2.0853, 2.1738, 2.0134, 2.2031], + device='cuda:5'), covar=tensor([0.0755, 0.0703, 0.1491, 0.1803, 0.1005, 0.1769, 0.1769, 0.0741], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0193, 0.0202, 0.0183, 0.0212, 0.0211, 0.0225, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:34:12,691 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131530.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:34:29,181 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131553.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:34:33,175 INFO [finetune.py:976] (5/7) Epoch 23, batch 5550, loss[loss=0.1374, simple_loss=0.2198, pruned_loss=0.02749, over 4912.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2419, pruned_loss=0.05053, over 950068.31 frames. ], batch size: 28, lr: 3.08e-03, grad_scale: 16.0 +2023-03-27 04:34:36,710 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.823e+01 1.422e+02 1.728e+02 2.186e+02 5.215e+02, threshold=3.457e+02, percent-clipped=2.0 +2023-03-27 04:34:42,843 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3178, 1.2907, 1.6278, 2.4681, 1.6049, 2.1505, 0.8446, 2.1024], + device='cuda:5'), covar=tensor([0.1824, 0.1369, 0.1118, 0.0724, 0.0938, 0.1239, 0.1518, 0.0620], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0133, 0.0164, 0.0100, 0.0136, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:35:04,717 INFO [finetune.py:976] (5/7) Epoch 23, batch 5600, loss[loss=0.1733, simple_loss=0.2514, pruned_loss=0.04753, over 4121.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2469, pruned_loss=0.0518, over 953187.49 frames. ], batch size: 65, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:35:06,575 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1427, 2.0434, 1.7300, 2.0975, 1.9372, 1.9633, 1.9313, 2.7099], + device='cuda:5'), covar=tensor([0.3673, 0.4348, 0.3247, 0.4068, 0.4093, 0.2671, 0.4011, 0.1680], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0262, 0.0233, 0.0274, 0.0254, 0.0225, 0.0252, 0.0234], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:35:34,624 INFO [finetune.py:976] (5/7) Epoch 23, batch 5650, loss[loss=0.1757, simple_loss=0.2645, pruned_loss=0.04344, over 4864.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2501, pruned_loss=0.05286, over 953881.64 frames. ], batch size: 47, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:35:37,861 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.955e+01 1.494e+02 1.801e+02 2.339e+02 4.576e+02, threshold=3.601e+02, percent-clipped=4.0 +2023-03-27 04:36:04,504 INFO [finetune.py:976] (5/7) Epoch 23, batch 5700, loss[loss=0.1594, simple_loss=0.2205, pruned_loss=0.0492, over 4105.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2457, pruned_loss=0.05153, over 938217.24 frames. ], batch size: 18, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:36:40,049 INFO [finetune.py:976] (5/7) Epoch 24, batch 0, loss[loss=0.2093, simple_loss=0.2706, pruned_loss=0.07406, over 4804.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2706, pruned_loss=0.07406, over 4804.00 frames. ], batch size: 55, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:36:40,049 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 04:36:43,511 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1087, 1.8999, 1.7568, 1.7608, 1.8495, 1.8904, 1.8364, 2.5490], + device='cuda:5'), covar=tensor([0.3922, 0.4690, 0.3244, 0.3927, 0.4207, 0.2536, 0.4022, 0.1861], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0263, 0.0234, 0.0275, 0.0256, 0.0226, 0.0253, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:36:49,758 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2123, 2.0805, 2.0091, 1.9003, 1.9856, 2.0759, 2.0279, 2.6644], + device='cuda:5'), covar=tensor([0.3522, 0.3949, 0.2978, 0.3714, 0.3875, 0.2211, 0.3662, 0.1834], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0263, 0.0234, 0.0275, 0.0256, 0.0226, 0.0253, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:36:50,762 INFO [finetune.py:1010] (5/7) Epoch 24, validation: loss=0.1594, simple_loss=0.227, pruned_loss=0.04592, over 2265189.00 frames. +2023-03-27 04:36:50,762 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 04:36:52,447 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1701, 1.5026, 0.8679, 1.9516, 2.4630, 1.8209, 1.8502, 2.0048], + device='cuda:5'), covar=tensor([0.1514, 0.2099, 0.2013, 0.1248, 0.1846, 0.1960, 0.1422, 0.1920], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0092, 0.0119, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:37:07,464 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.206e+01 1.398e+02 1.674e+02 2.004e+02 3.219e+02, threshold=3.348e+02, percent-clipped=0.0 +2023-03-27 04:37:08,163 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131765.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:37:12,926 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131772.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:37:25,289 INFO [finetune.py:976] (5/7) Epoch 24, batch 50, loss[loss=0.152, simple_loss=0.2399, pruned_loss=0.03206, over 4808.00 frames. ], tot_loss[loss=0.1796, simple_loss=0.2521, pruned_loss=0.05352, over 217404.63 frames. ], batch size: 39, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:38:02,615 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131830.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:38:07,277 INFO [finetune.py:976] (5/7) Epoch 24, batch 100, loss[loss=0.1338, simple_loss=0.2035, pruned_loss=0.03206, over 4829.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2428, pruned_loss=0.0502, over 382507.83 frames. ], batch size: 33, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:38:07,691 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.52 vs. limit=5.0 +2023-03-27 04:38:15,492 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131848.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:38:20,990 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131857.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:38:25,098 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.662e+01 1.465e+02 1.761e+02 2.142e+02 3.724e+02, threshold=3.523e+02, percent-clipped=1.0 +2023-03-27 04:38:34,599 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=131878.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:38:40,502 INFO [finetune.py:976] (5/7) Epoch 24, batch 150, loss[loss=0.1396, simple_loss=0.2093, pruned_loss=0.03502, over 4836.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2366, pruned_loss=0.04901, over 509758.14 frames. ], batch size: 30, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:39:10,821 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131918.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:39:29,542 INFO [finetune.py:976] (5/7) Epoch 24, batch 200, loss[loss=0.1792, simple_loss=0.2405, pruned_loss=0.05895, over 4834.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2367, pruned_loss=0.04991, over 606630.92 frames. ], batch size: 33, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:39:43,438 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2573, 2.2160, 1.8452, 2.2658, 2.0766, 2.1010, 2.0554, 3.0025], + device='cuda:5'), covar=tensor([0.4024, 0.4684, 0.3544, 0.4439, 0.4898, 0.2460, 0.4312, 0.1686], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0263, 0.0234, 0.0275, 0.0256, 0.0227, 0.0254, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:39:51,183 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.027e+02 1.517e+02 1.799e+02 2.123e+02 6.232e+02, threshold=3.598e+02, percent-clipped=3.0 +2023-03-27 04:40:06,650 INFO [finetune.py:976] (5/7) Epoch 24, batch 250, loss[loss=0.2094, simple_loss=0.2886, pruned_loss=0.06516, over 4856.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2393, pruned_loss=0.0499, over 685112.22 frames. ], batch size: 44, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:40:36,981 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132031.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:40:41,434 INFO [finetune.py:976] (5/7) Epoch 24, batch 300, loss[loss=0.1344, simple_loss=0.2111, pruned_loss=0.02887, over 4756.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2462, pruned_loss=0.05202, over 745776.85 frames. ], batch size: 26, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:40:53,255 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132054.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:40:56,276 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1752, 2.1346, 1.7951, 2.1027, 1.9304, 1.9423, 1.9903, 2.6963], + device='cuda:5'), covar=tensor([0.3604, 0.4042, 0.3055, 0.3837, 0.3999, 0.2431, 0.3866, 0.1771], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0234, 0.0275, 0.0256, 0.0226, 0.0253, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:40:59,162 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.071e+02 1.635e+02 1.887e+02 2.261e+02 6.512e+02, threshold=3.774e+02, percent-clipped=2.0 +2023-03-27 04:40:59,881 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132065.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:41:04,186 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132072.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:41:14,120 INFO [finetune.py:976] (5/7) Epoch 24, batch 350, loss[loss=0.1708, simple_loss=0.2528, pruned_loss=0.0444, over 4854.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2483, pruned_loss=0.05225, over 792359.22 frames. ], batch size: 31, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:41:17,773 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132092.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:41:33,454 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=132113.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:41:39,162 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132115.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:41:40,376 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2151, 1.3436, 1.4181, 0.6792, 1.3285, 1.5780, 1.6454, 1.3190], + device='cuda:5'), covar=tensor([0.0886, 0.0614, 0.0500, 0.0484, 0.0502, 0.0579, 0.0333, 0.0647], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0147, 0.0124, 0.0120, 0.0129, 0.0128, 0.0139, 0.0146], + device='cuda:5'), out_proj_covar=tensor([8.7834e-05, 1.0559e-04, 8.8833e-05, 8.4564e-05, 9.0815e-05, 9.0859e-05, + 9.8914e-05, 1.0472e-04], device='cuda:5') +2023-03-27 04:41:42,169 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=132120.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:41:56,058 INFO [finetune.py:976] (5/7) Epoch 24, batch 400, loss[loss=0.1685, simple_loss=0.2508, pruned_loss=0.04307, over 4897.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2499, pruned_loss=0.05261, over 829134.20 frames. ], batch size: 35, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:42:03,865 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132148.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:42:11,266 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5604, 1.4181, 1.9596, 3.1618, 2.1343, 2.2369, 1.1513, 2.6329], + device='cuda:5'), covar=tensor([0.1626, 0.1372, 0.1209, 0.0594, 0.0765, 0.1610, 0.1622, 0.0475], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0162, 0.0100, 0.0136, 0.0124, 0.0099], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:42:15,410 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.594e+02 1.883e+02 2.349e+02 4.739e+02, threshold=3.766e+02, percent-clipped=1.0 +2023-03-27 04:42:29,844 INFO [finetune.py:976] (5/7) Epoch 24, batch 450, loss[loss=0.1675, simple_loss=0.2432, pruned_loss=0.0459, over 4707.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2469, pruned_loss=0.05144, over 856478.53 frames. ], batch size: 59, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:42:36,330 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=132196.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:42:38,168 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0977, 0.9989, 0.8763, 1.1345, 1.2372, 1.1387, 1.0075, 0.9129], + device='cuda:5'), covar=tensor([0.0422, 0.0350, 0.0778, 0.0346, 0.0316, 0.0429, 0.0367, 0.0459], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0105, 0.0142, 0.0110, 0.0098, 0.0110, 0.0100, 0.0110], + device='cuda:5'), out_proj_covar=tensor([7.6328e-05, 8.0439e-05, 1.1112e-04, 8.3876e-05, 7.6321e-05, 8.1177e-05, + 7.4037e-05, 8.3915e-05], device='cuda:5') +2023-03-27 04:42:55,014 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132213.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:43:13,242 INFO [finetune.py:976] (5/7) Epoch 24, batch 500, loss[loss=0.1715, simple_loss=0.2395, pruned_loss=0.0518, over 4696.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2448, pruned_loss=0.05132, over 879738.94 frames. ], batch size: 23, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:43:24,723 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 04:43:32,460 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.902e+01 1.519e+02 1.809e+02 2.135e+02 3.897e+02, threshold=3.617e+02, percent-clipped=1.0 +2023-03-27 04:43:46,924 INFO [finetune.py:976] (5/7) Epoch 24, batch 550, loss[loss=0.13, simple_loss=0.2007, pruned_loss=0.02964, over 4820.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2413, pruned_loss=0.05039, over 896490.88 frames. ], batch size: 25, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:44:28,361 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-03-27 04:44:30,205 INFO [finetune.py:976] (5/7) Epoch 24, batch 600, loss[loss=0.1849, simple_loss=0.255, pruned_loss=0.05737, over 4793.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2425, pruned_loss=0.0507, over 909981.51 frames. ], batch size: 29, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:44:31,559 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6640, 1.1933, 0.8416, 1.4771, 2.0661, 1.0834, 1.5321, 1.5442], + device='cuda:5'), covar=tensor([0.1443, 0.2083, 0.1897, 0.1258, 0.1982, 0.2002, 0.1396, 0.1959], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0092, 0.0118, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:44:58,205 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.737e+01 1.576e+02 1.859e+02 2.330e+02 3.343e+02, threshold=3.718e+02, percent-clipped=0.0 +2023-03-27 04:45:12,587 INFO [finetune.py:976] (5/7) Epoch 24, batch 650, loss[loss=0.1626, simple_loss=0.2431, pruned_loss=0.04102, over 4927.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.2456, pruned_loss=0.05155, over 917940.64 frames. ], batch size: 33, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:45:12,659 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132387.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:45:28,137 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132410.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 04:45:46,140 INFO [finetune.py:976] (5/7) Epoch 24, batch 700, loss[loss=0.1787, simple_loss=0.2527, pruned_loss=0.05239, over 4845.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.246, pruned_loss=0.05146, over 927516.30 frames. ], batch size: 31, lr: 3.07e-03, grad_scale: 16.0 +2023-03-27 04:46:03,852 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.085e+02 1.592e+02 1.911e+02 2.262e+02 4.191e+02, threshold=3.822e+02, percent-clipped=2.0 +2023-03-27 04:46:19,325 INFO [finetune.py:976] (5/7) Epoch 24, batch 750, loss[loss=0.1623, simple_loss=0.2378, pruned_loss=0.0434, over 4741.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2471, pruned_loss=0.05149, over 933092.47 frames. ], batch size: 26, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:46:36,510 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132513.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:46:58,683 INFO [finetune.py:976] (5/7) Epoch 24, batch 800, loss[loss=0.1904, simple_loss=0.2515, pruned_loss=0.06462, over 4822.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2471, pruned_loss=0.05139, over 937298.71 frames. ], batch size: 33, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:47:17,623 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=132561.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:47:19,960 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.775e+01 1.480e+02 1.729e+02 2.075e+02 4.531e+02, threshold=3.459e+02, percent-clipped=1.0 +2023-03-27 04:47:35,957 INFO [finetune.py:976] (5/7) Epoch 24, batch 850, loss[loss=0.1864, simple_loss=0.2598, pruned_loss=0.0565, over 4907.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2456, pruned_loss=0.05141, over 941302.19 frames. ], batch size: 37, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:48:18,614 INFO [finetune.py:976] (5/7) Epoch 24, batch 900, loss[loss=0.148, simple_loss=0.2105, pruned_loss=0.04275, over 4826.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2423, pruned_loss=0.05001, over 946236.51 frames. ], batch size: 30, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:48:33,143 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132660.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:48:35,407 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.013e+02 1.417e+02 1.718e+02 2.002e+02 3.598e+02, threshold=3.436e+02, percent-clipped=1.0 +2023-03-27 04:48:52,530 INFO [finetune.py:976] (5/7) Epoch 24, batch 950, loss[loss=0.1591, simple_loss=0.2347, pruned_loss=0.0417, over 4907.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.2395, pruned_loss=0.04888, over 948733.08 frames. ], batch size: 37, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:48:52,620 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132687.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:49:07,061 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132710.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 04:49:08,293 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7080, 1.5100, 2.0604, 1.4685, 1.8926, 2.0964, 1.4328, 2.2316], + device='cuda:5'), covar=tensor([0.1306, 0.2160, 0.1289, 0.1706, 0.0862, 0.1245, 0.3021, 0.0723], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0207, 0.0191, 0.0191, 0.0172, 0.0214, 0.0216, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:49:14,302 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132721.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:49:26,440 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=132735.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:49:27,542 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2502, 2.9895, 2.8323, 1.2121, 3.0677, 2.3003, 0.6908, 1.8264], + device='cuda:5'), covar=tensor([0.2478, 0.2951, 0.1830, 0.3893, 0.1330, 0.1143, 0.4355, 0.1856], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0179, 0.0162, 0.0129, 0.0161, 0.0124, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 04:49:28,061 INFO [finetune.py:976] (5/7) Epoch 24, batch 1000, loss[loss=0.2099, simple_loss=0.279, pruned_loss=0.07045, over 4826.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2416, pruned_loss=0.04994, over 947871.04 frames. ], batch size: 30, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:49:38,674 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132746.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:49:50,823 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=132758.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:49:54,882 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.613e+02 1.803e+02 2.355e+02 4.590e+02, threshold=3.605e+02, percent-clipped=3.0 +2023-03-27 04:50:17,563 INFO [finetune.py:976] (5/7) Epoch 24, batch 1050, loss[loss=0.2119, simple_loss=0.2767, pruned_loss=0.07358, over 4824.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2444, pruned_loss=0.05035, over 949344.24 frames. ], batch size: 30, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:50:31,473 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132807.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:50:51,434 INFO [finetune.py:976] (5/7) Epoch 24, batch 1100, loss[loss=0.1807, simple_loss=0.2613, pruned_loss=0.05001, over 4821.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2461, pruned_loss=0.05076, over 948939.80 frames. ], batch size: 55, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:51:08,748 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.623e+02 1.902e+02 2.304e+02 4.937e+02, threshold=3.804e+02, percent-clipped=2.0 +2023-03-27 04:51:24,188 INFO [finetune.py:976] (5/7) Epoch 24, batch 1150, loss[loss=0.2108, simple_loss=0.2667, pruned_loss=0.0774, over 4836.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2469, pruned_loss=0.05066, over 951179.10 frames. ], batch size: 44, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:51:57,324 INFO [finetune.py:976] (5/7) Epoch 24, batch 1200, loss[loss=0.1711, simple_loss=0.238, pruned_loss=0.05214, over 4825.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2465, pruned_loss=0.0506, over 951304.70 frames. ], batch size: 39, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:52:24,717 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.055e+01 1.470e+02 1.716e+02 2.148e+02 3.548e+02, threshold=3.432e+02, percent-clipped=0.0 +2023-03-27 04:52:40,282 INFO [finetune.py:976] (5/7) Epoch 24, batch 1250, loss[loss=0.1662, simple_loss=0.2367, pruned_loss=0.04786, over 4759.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2451, pruned_loss=0.0508, over 951196.70 frames. ], batch size: 27, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:52:42,298 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.11 vs. limit=5.0 +2023-03-27 04:52:59,669 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133016.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:53:02,117 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3407, 3.8036, 3.9574, 4.1730, 4.1404, 3.7920, 4.4173, 1.3645], + device='cuda:5'), covar=tensor([0.0818, 0.0934, 0.1021, 0.0982, 0.1218, 0.1594, 0.0764, 0.5847], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0249, 0.0283, 0.0295, 0.0341, 0.0288, 0.0307, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:53:05,070 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7935, 1.2412, 0.7715, 1.6090, 2.1345, 1.5206, 1.4986, 1.6077], + device='cuda:5'), covar=tensor([0.1387, 0.1960, 0.1892, 0.1174, 0.1855, 0.2004, 0.1429, 0.1973], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0093, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 04:53:15,460 INFO [finetune.py:976] (5/7) Epoch 24, batch 1300, loss[loss=0.155, simple_loss=0.2293, pruned_loss=0.04038, over 4767.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2416, pruned_loss=0.04987, over 951738.31 frames. ], batch size: 28, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:53:42,215 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.345e+01 1.496e+02 1.852e+02 2.141e+02 4.041e+02, threshold=3.705e+02, percent-clipped=2.0 +2023-03-27 04:53:57,208 INFO [finetune.py:976] (5/7) Epoch 24, batch 1350, loss[loss=0.2162, simple_loss=0.2833, pruned_loss=0.07458, over 4682.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2402, pruned_loss=0.04913, over 951718.67 frames. ], batch size: 59, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:54:05,646 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6251, 1.5530, 1.9687, 1.3791, 1.8155, 1.9876, 1.4727, 2.1394], + device='cuda:5'), covar=tensor([0.1434, 0.2304, 0.1523, 0.1983, 0.1031, 0.1546, 0.2798, 0.0996], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0209, 0.0192, 0.0191, 0.0174, 0.0215, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:54:07,945 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133102.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:54:31,059 INFO [finetune.py:976] (5/7) Epoch 24, batch 1400, loss[loss=0.1911, simple_loss=0.2644, pruned_loss=0.05889, over 4896.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.243, pruned_loss=0.04975, over 954361.47 frames. ], batch size: 37, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:54:35,890 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7296, 1.7041, 1.6865, 1.6968, 1.4631, 3.7905, 1.6060, 1.9912], + device='cuda:5'), covar=tensor([0.3353, 0.2549, 0.2046, 0.2450, 0.1726, 0.0247, 0.2306, 0.1217], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0115, 0.0121, 0.0123, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 04:54:59,477 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.355e+01 1.624e+02 1.914e+02 2.315e+02 3.947e+02, threshold=3.828e+02, percent-clipped=1.0 +2023-03-27 04:55:19,623 INFO [finetune.py:976] (5/7) Epoch 24, batch 1450, loss[loss=0.1625, simple_loss=0.2404, pruned_loss=0.04231, over 4897.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2455, pruned_loss=0.05052, over 955219.70 frames. ], batch size: 43, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:55:35,368 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8478, 1.7392, 2.1686, 1.4018, 1.8962, 2.1441, 1.6217, 2.2814], + device='cuda:5'), covar=tensor([0.1154, 0.1948, 0.1225, 0.1729, 0.0892, 0.1174, 0.2689, 0.0850], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0208, 0.0192, 0.0190, 0.0173, 0.0214, 0.0216, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:55:56,690 INFO [finetune.py:976] (5/7) Epoch 24, batch 1500, loss[loss=0.1968, simple_loss=0.2588, pruned_loss=0.06738, over 4762.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2473, pruned_loss=0.05162, over 955343.94 frames. ], batch size: 26, lr: 3.06e-03, grad_scale: 16.0 +2023-03-27 04:55:56,772 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2160, 3.6848, 3.8582, 4.0378, 3.9775, 3.7742, 4.3110, 1.4802], + device='cuda:5'), covar=tensor([0.0727, 0.0879, 0.0894, 0.0937, 0.1108, 0.1438, 0.0743, 0.5610], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0247, 0.0281, 0.0293, 0.0338, 0.0286, 0.0306, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:55:56,926 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.33 vs. limit=5.0 +2023-03-27 04:56:15,020 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.579e+02 1.864e+02 2.355e+02 5.095e+02, threshold=3.727e+02, percent-clipped=2.0 +2023-03-27 04:56:24,025 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3610, 2.4397, 2.0227, 2.6604, 2.3550, 2.0470, 2.7691, 2.4844], + device='cuda:5'), covar=tensor([0.1245, 0.2230, 0.2496, 0.2066, 0.2089, 0.1526, 0.2583, 0.1521], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0188, 0.0235, 0.0251, 0.0247, 0.0204, 0.0213, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:56:30,468 INFO [finetune.py:976] (5/7) Epoch 24, batch 1550, loss[loss=0.2317, simple_loss=0.3004, pruned_loss=0.08148, over 4709.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2479, pruned_loss=0.05186, over 952253.90 frames. ], batch size: 54, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 04:56:50,656 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133316.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:57:04,266 INFO [finetune.py:976] (5/7) Epoch 24, batch 1600, loss[loss=0.1635, simple_loss=0.2312, pruned_loss=0.04786, over 4844.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2456, pruned_loss=0.05103, over 953511.55 frames. ], batch size: 49, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 04:57:28,473 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.451e+02 1.796e+02 2.043e+02 3.402e+02, threshold=3.593e+02, percent-clipped=0.0 +2023-03-27 04:57:28,563 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=133364.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:57:38,829 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133374.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 04:57:40,029 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133376.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:57:46,648 INFO [finetune.py:976] (5/7) Epoch 24, batch 1650, loss[loss=0.1157, simple_loss=0.1864, pruned_loss=0.02252, over 4708.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2435, pruned_loss=0.05079, over 953476.82 frames. ], batch size: 59, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 04:57:50,094 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-03-27 04:57:56,899 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133402.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:58:19,421 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133435.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 04:58:20,505 INFO [finetune.py:976] (5/7) Epoch 24, batch 1700, loss[loss=0.1799, simple_loss=0.2509, pruned_loss=0.05447, over 4261.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2403, pruned_loss=0.05006, over 954365.66 frames. ], batch size: 66, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 04:58:20,606 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133437.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:58:31,233 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=133450.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:58:48,616 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.499e+01 1.457e+02 1.770e+02 2.219e+02 3.253e+02, threshold=3.541e+02, percent-clipped=0.0 +2023-03-27 04:58:52,967 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8292, 1.3255, 1.9639, 1.8765, 1.6903, 1.6367, 1.8312, 1.8205], + device='cuda:5'), covar=tensor([0.3918, 0.3955, 0.3123, 0.3614, 0.4592, 0.3745, 0.4622, 0.3019], + device='cuda:5'), in_proj_covar=tensor([0.0259, 0.0244, 0.0263, 0.0287, 0.0287, 0.0263, 0.0295, 0.0247], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 04:58:55,865 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133474.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:59:03,618 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6778, 1.5789, 2.0177, 3.3629, 2.2212, 2.3141, 1.0302, 2.7730], + device='cuda:5'), covar=tensor([0.1620, 0.1373, 0.1268, 0.0483, 0.0789, 0.1455, 0.1857, 0.0448], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0162, 0.0100, 0.0136, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:59:04,125 INFO [finetune.py:976] (5/7) Epoch 24, batch 1750, loss[loss=0.1927, simple_loss=0.2629, pruned_loss=0.06122, over 4922.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2418, pruned_loss=0.05058, over 954459.94 frames. ], batch size: 38, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 04:59:24,980 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6508, 1.2228, 0.8036, 1.4918, 2.0117, 1.3618, 1.4643, 1.4926], + device='cuda:5'), covar=tensor([0.1425, 0.1937, 0.1937, 0.1176, 0.1931, 0.1938, 0.1338, 0.1938], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0108, 0.0091, 0.0118, 0.0093, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 04:59:36,816 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133535.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 04:59:37,912 INFO [finetune.py:976] (5/7) Epoch 24, batch 1800, loss[loss=0.1367, simple_loss=0.206, pruned_loss=0.03371, over 4933.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2441, pruned_loss=0.05112, over 953419.48 frames. ], batch size: 33, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 04:59:57,742 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.575e+02 1.839e+02 2.282e+02 3.463e+02, threshold=3.677e+02, percent-clipped=0.0 +2023-03-27 05:00:09,627 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7880, 1.2066, 1.8441, 1.8183, 1.6230, 1.6214, 1.7530, 1.7318], + device='cuda:5'), covar=tensor([0.3604, 0.3676, 0.2772, 0.3167, 0.4197, 0.3374, 0.4034, 0.2601], + device='cuda:5'), in_proj_covar=tensor([0.0259, 0.0244, 0.0263, 0.0287, 0.0287, 0.0263, 0.0295, 0.0248], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:00:23,457 INFO [finetune.py:976] (5/7) Epoch 24, batch 1850, loss[loss=0.1618, simple_loss=0.24, pruned_loss=0.04183, over 4790.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2458, pruned_loss=0.052, over 955629.67 frames. ], batch size: 29, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 05:00:24,138 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5374, 1.1587, 0.8611, 1.4193, 1.9800, 1.3469, 1.2898, 1.4961], + device='cuda:5'), covar=tensor([0.1492, 0.2056, 0.1959, 0.1243, 0.1917, 0.2058, 0.1441, 0.1950], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0092, 0.0118, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 05:00:36,310 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5925, 2.4345, 2.1802, 2.5498, 2.3894, 2.3601, 2.3360, 3.4158], + device='cuda:5'), covar=tensor([0.3628, 0.5246, 0.3443, 0.4163, 0.4145, 0.2607, 0.4535, 0.1498], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0263, 0.0235, 0.0277, 0.0258, 0.0228, 0.0255, 0.0236], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:01:04,053 INFO [finetune.py:976] (5/7) Epoch 24, batch 1900, loss[loss=0.1731, simple_loss=0.2533, pruned_loss=0.04642, over 4802.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2481, pruned_loss=0.05277, over 954955.18 frames. ], batch size: 40, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 05:01:14,220 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133652.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:01:21,802 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.050e+02 1.540e+02 1.881e+02 2.277e+02 3.366e+02, threshold=3.762e+02, percent-clipped=0.0 +2023-03-27 05:01:37,660 INFO [finetune.py:976] (5/7) Epoch 24, batch 1950, loss[loss=0.148, simple_loss=0.2301, pruned_loss=0.03291, over 4759.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2466, pruned_loss=0.05178, over 956519.03 frames. ], batch size: 26, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 05:01:46,415 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-03-27 05:01:52,136 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6489, 1.7671, 1.8403, 1.1745, 1.9299, 2.1615, 2.1431, 1.6354], + device='cuda:5'), covar=tensor([0.0904, 0.0823, 0.0568, 0.0587, 0.0526, 0.0777, 0.0326, 0.0876], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0148, 0.0127, 0.0122, 0.0131, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.8812e-05, 1.0656e-04, 9.0819e-05, 8.5640e-05, 9.1991e-05, 9.2129e-05, + 1.0061e-04, 1.0578e-04], device='cuda:5') +2023-03-27 05:01:55,029 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133713.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:02:06,304 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133730.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 05:02:07,531 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133732.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:02:11,409 INFO [finetune.py:976] (5/7) Epoch 24, batch 2000, loss[loss=0.185, simple_loss=0.2519, pruned_loss=0.05908, over 4738.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2446, pruned_loss=0.05145, over 956061.78 frames. ], batch size: 54, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 05:02:28,714 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.381e+01 1.373e+02 1.735e+02 2.243e+02 3.912e+02, threshold=3.469e+02, percent-clipped=2.0 +2023-03-27 05:02:54,159 INFO [finetune.py:976] (5/7) Epoch 24, batch 2050, loss[loss=0.1386, simple_loss=0.2157, pruned_loss=0.0307, over 4788.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2418, pruned_loss=0.05076, over 956152.02 frames. ], batch size: 25, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 05:03:23,705 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133830.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:03:27,933 INFO [finetune.py:976] (5/7) Epoch 24, batch 2100, loss[loss=0.2258, simple_loss=0.3028, pruned_loss=0.07439, over 4932.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2425, pruned_loss=0.05134, over 956972.92 frames. ], batch size: 38, lr: 3.06e-03, grad_scale: 32.0 +2023-03-27 05:03:47,591 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.827e+01 1.541e+02 1.860e+02 2.293e+02 6.118e+02, threshold=3.720e+02, percent-clipped=3.0 +2023-03-27 05:04:11,228 INFO [finetune.py:976] (5/7) Epoch 24, batch 2150, loss[loss=0.1693, simple_loss=0.2447, pruned_loss=0.04694, over 4832.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.2457, pruned_loss=0.05222, over 957121.58 frames. ], batch size: 30, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:04:39,757 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3541, 2.2031, 1.6991, 2.2462, 2.1827, 1.9161, 2.6260, 2.3167], + device='cuda:5'), covar=tensor([0.1360, 0.2290, 0.3299, 0.2727, 0.2849, 0.1806, 0.3182, 0.1828], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0238, 0.0256, 0.0252, 0.0208, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:04:44,944 INFO [finetune.py:976] (5/7) Epoch 24, batch 2200, loss[loss=0.2681, simple_loss=0.314, pruned_loss=0.1111, over 4814.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.2484, pruned_loss=0.05253, over 955807.01 frames. ], batch size: 39, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:05:02,713 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.027e+02 1.464e+02 1.824e+02 2.239e+02 3.694e+02, threshold=3.648e+02, percent-clipped=0.0 +2023-03-27 05:05:23,027 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1227, 1.2419, 1.5557, 1.2691, 1.3980, 2.4766, 1.2150, 1.3708], + device='cuda:5'), covar=tensor([0.1167, 0.2089, 0.1080, 0.1157, 0.1965, 0.0420, 0.1820, 0.2169], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0074, 0.0077, 0.0092, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:05:24,729 INFO [finetune.py:976] (5/7) Epoch 24, batch 2250, loss[loss=0.1457, simple_loss=0.2326, pruned_loss=0.02938, over 4918.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2485, pruned_loss=0.05237, over 953576.74 frames. ], batch size: 38, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:05:25,550 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-27 05:05:35,444 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133997.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:05:49,821 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134008.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:06:08,017 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134030.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 05:06:09,734 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134032.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:06:12,678 INFO [finetune.py:976] (5/7) Epoch 24, batch 2300, loss[loss=0.155, simple_loss=0.2278, pruned_loss=0.04106, over 4853.00 frames. ], tot_loss[loss=0.177, simple_loss=0.249, pruned_loss=0.05247, over 953457.57 frames. ], batch size: 31, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:06:17,261 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-27 05:06:27,070 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134058.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:06:31,058 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.533e+02 1.723e+02 2.089e+02 4.293e+02, threshold=3.445e+02, percent-clipped=2.0 +2023-03-27 05:06:40,133 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=134078.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 05:06:41,359 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=134080.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:06:46,521 INFO [finetune.py:976] (5/7) Epoch 24, batch 2350, loss[loss=0.165, simple_loss=0.2411, pruned_loss=0.04441, over 4749.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2466, pruned_loss=0.0513, over 955741.91 frames. ], batch size: 26, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:07:09,017 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7042, 3.5378, 3.4653, 1.7850, 3.6468, 2.6501, 1.0514, 2.6121], + device='cuda:5'), covar=tensor([0.2675, 0.1850, 0.1412, 0.3110, 0.1016, 0.1087, 0.3886, 0.1377], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0179, 0.0160, 0.0129, 0.0160, 0.0124, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:07:14,971 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134130.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:07:19,177 INFO [finetune.py:976] (5/7) Epoch 24, batch 2400, loss[loss=0.1459, simple_loss=0.2135, pruned_loss=0.03916, over 4751.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2433, pruned_loss=0.05032, over 957869.22 frames. ], batch size: 26, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:07:35,393 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-27 05:07:38,328 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.553e+01 1.434e+02 1.789e+02 2.166e+02 3.942e+02, threshold=3.577e+02, percent-clipped=1.0 +2023-03-27 05:07:49,932 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=134178.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:07:55,484 INFO [finetune.py:976] (5/7) Epoch 24, batch 2450, loss[loss=0.1458, simple_loss=0.2133, pruned_loss=0.03912, over 4906.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2408, pruned_loss=0.04994, over 955635.59 frames. ], batch size: 43, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:08:12,574 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0701, 1.9395, 2.0753, 1.3535, 2.0144, 2.1276, 2.1480, 1.6325], + device='cuda:5'), covar=tensor([0.0531, 0.0657, 0.0648, 0.0874, 0.0797, 0.0702, 0.0576, 0.1195], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0137, 0.0140, 0.0120, 0.0127, 0.0139, 0.0139, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:08:36,907 INFO [finetune.py:976] (5/7) Epoch 24, batch 2500, loss[loss=0.2023, simple_loss=0.2704, pruned_loss=0.06713, over 4868.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2422, pruned_loss=0.05043, over 955670.80 frames. ], batch size: 34, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:08:55,716 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.396e+01 1.499e+02 1.865e+02 2.171e+02 5.575e+02, threshold=3.730e+02, percent-clipped=1.0 +2023-03-27 05:09:01,379 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1284, 2.1767, 1.9364, 2.2497, 2.0593, 2.1087, 2.0620, 2.7468], + device='cuda:5'), covar=tensor([0.3319, 0.4127, 0.2975, 0.3901, 0.4065, 0.2165, 0.4024, 0.1470], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0262, 0.0234, 0.0275, 0.0256, 0.0226, 0.0253, 0.0235], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:09:20,384 INFO [finetune.py:976] (5/7) Epoch 24, batch 2550, loss[loss=0.1672, simple_loss=0.2496, pruned_loss=0.04238, over 4837.00 frames. ], tot_loss[loss=0.1744, simple_loss=0.2456, pruned_loss=0.05155, over 954844.84 frames. ], batch size: 47, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:09:32,216 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134304.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:09:34,676 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134308.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:09:35,284 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134309.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:09:54,095 INFO [finetune.py:976] (5/7) Epoch 24, batch 2600, loss[loss=0.1814, simple_loss=0.2473, pruned_loss=0.0577, over 4889.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2472, pruned_loss=0.05194, over 954452.39 frames. ], batch size: 32, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:10:04,847 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134353.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:10:07,201 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=134356.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:10:09,697 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2811, 2.9856, 2.7645, 1.2229, 3.0577, 2.2753, 0.6896, 1.8935], + device='cuda:5'), covar=tensor([0.2422, 0.2186, 0.1905, 0.3668, 0.1397, 0.1183, 0.4186, 0.1669], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0180, 0.0162, 0.0130, 0.0161, 0.0124, 0.0149, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:10:11,582 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6867, 2.7182, 2.7219, 1.7514, 2.6161, 2.8637, 2.9776, 2.3991], + device='cuda:5'), covar=tensor([0.0627, 0.0595, 0.0658, 0.0994, 0.0919, 0.0751, 0.0593, 0.1004], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0138, 0.0141, 0.0121, 0.0128, 0.0140, 0.0141, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:10:12,060 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.471e+02 1.806e+02 2.184e+02 4.519e+02, threshold=3.612e+02, percent-clipped=2.0 +2023-03-27 05:10:13,311 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134365.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:10:16,846 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134370.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:10:29,858 INFO [finetune.py:976] (5/7) Epoch 24, batch 2650, loss[loss=0.1806, simple_loss=0.2452, pruned_loss=0.05798, over 4756.00 frames. ], tot_loss[loss=0.1755, simple_loss=0.2471, pruned_loss=0.05199, over 952889.30 frames. ], batch size: 28, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:10:55,603 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4566, 3.8537, 4.0920, 4.2840, 4.2270, 3.9521, 4.5435, 1.5177], + device='cuda:5'), covar=tensor([0.0744, 0.0773, 0.0757, 0.0889, 0.1063, 0.1486, 0.0656, 0.5366], + device='cuda:5'), in_proj_covar=tensor([0.0346, 0.0246, 0.0281, 0.0292, 0.0336, 0.0286, 0.0307, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:11:07,366 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9258, 1.8984, 1.6505, 2.0993, 2.4802, 2.0588, 1.8534, 1.5690], + device='cuda:5'), covar=tensor([0.2024, 0.1906, 0.1906, 0.1495, 0.1583, 0.1121, 0.2225, 0.1885], + device='cuda:5'), in_proj_covar=tensor([0.0242, 0.0208, 0.0212, 0.0195, 0.0241, 0.0189, 0.0214, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:11:14,493 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6500, 1.5751, 1.4837, 1.5985, 1.1646, 3.3658, 1.4288, 1.8137], + device='cuda:5'), covar=tensor([0.3442, 0.2613, 0.2225, 0.2540, 0.1890, 0.0253, 0.2541, 0.1294], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0117, 0.0121, 0.0124, 0.0114, 0.0097, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:11:21,025 INFO [finetune.py:976] (5/7) Epoch 24, batch 2700, loss[loss=0.1563, simple_loss=0.2302, pruned_loss=0.04123, over 4874.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2462, pruned_loss=0.05133, over 953407.36 frames. ], batch size: 34, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:11:39,164 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.916e+01 1.416e+02 1.758e+02 2.159e+02 3.599e+02, threshold=3.516e+02, percent-clipped=0.0 +2023-03-27 05:11:44,533 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6369, 1.5082, 1.0714, 0.3045, 1.2817, 1.4804, 1.5008, 1.4726], + device='cuda:5'), covar=tensor([0.0939, 0.0869, 0.1548, 0.1974, 0.1514, 0.2598, 0.2493, 0.0925], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0190, 0.0199, 0.0180, 0.0209, 0.0208, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:11:54,599 INFO [finetune.py:976] (5/7) Epoch 24, batch 2750, loss[loss=0.152, simple_loss=0.2255, pruned_loss=0.03931, over 4779.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2442, pruned_loss=0.05121, over 953538.25 frames. ], batch size: 28, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:12:27,873 INFO [finetune.py:976] (5/7) Epoch 24, batch 2800, loss[loss=0.1522, simple_loss=0.2247, pruned_loss=0.03983, over 4836.00 frames. ], tot_loss[loss=0.1699, simple_loss=0.2404, pruned_loss=0.04976, over 953847.85 frames. ], batch size: 39, lr: 3.05e-03, grad_scale: 32.0 +2023-03-27 05:12:46,113 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.301e+01 1.456e+02 1.823e+02 2.196e+02 4.309e+02, threshold=3.645e+02, percent-clipped=3.0 +2023-03-27 05:12:59,983 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-27 05:13:01,598 INFO [finetune.py:976] (5/7) Epoch 24, batch 2850, loss[loss=0.1618, simple_loss=0.2455, pruned_loss=0.03909, over 4759.00 frames. ], tot_loss[loss=0.1688, simple_loss=0.239, pruned_loss=0.0493, over 950947.87 frames. ], batch size: 54, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:13:06,524 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.84 vs. limit=5.0 +2023-03-27 05:13:17,398 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2206, 1.3772, 1.6438, 1.5168, 1.4909, 2.9806, 1.3364, 1.5113], + device='cuda:5'), covar=tensor([0.1041, 0.1763, 0.1038, 0.0973, 0.1591, 0.0261, 0.1469, 0.1793], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0072, 0.0076, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:13:45,120 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.10 vs. limit=5.0 +2023-03-27 05:13:45,376 INFO [finetune.py:976] (5/7) Epoch 24, batch 2900, loss[loss=0.2248, simple_loss=0.2923, pruned_loss=0.07863, over 4851.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2406, pruned_loss=0.04988, over 949940.45 frames. ], batch size: 44, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:13:56,307 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134653.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:14:00,532 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134660.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:14:03,944 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.559e+02 1.783e+02 2.063e+02 3.902e+02, threshold=3.566e+02, percent-clipped=1.0 +2023-03-27 05:14:04,029 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134665.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:14:20,893 INFO [finetune.py:976] (5/7) Epoch 24, batch 2950, loss[loss=0.2005, simple_loss=0.2723, pruned_loss=0.06434, over 4893.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.2428, pruned_loss=0.04974, over 951436.29 frames. ], batch size: 32, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:14:37,442 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=134701.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:14:42,163 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-03-27 05:14:46,527 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.71 vs. limit=5.0 +2023-03-27 05:14:58,299 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-27 05:15:02,626 INFO [finetune.py:976] (5/7) Epoch 24, batch 3000, loss[loss=0.2158, simple_loss=0.2838, pruned_loss=0.07386, over 4904.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2463, pruned_loss=0.05147, over 952779.26 frames. ], batch size: 36, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:15:02,626 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 05:15:13,334 INFO [finetune.py:1010] (5/7) Epoch 24, validation: loss=0.1561, simple_loss=0.2251, pruned_loss=0.0436, over 2265189.00 frames. +2023-03-27 05:15:13,334 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 05:15:15,184 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3595, 2.4170, 2.4510, 1.7614, 2.2014, 2.5414, 2.6163, 2.0179], + device='cuda:5'), covar=tensor([0.0585, 0.0569, 0.0637, 0.0780, 0.1065, 0.0705, 0.0565, 0.1124], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0138, 0.0141, 0.0120, 0.0128, 0.0139, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:15:26,744 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.10 vs. limit=2.0 +2023-03-27 05:15:31,260 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.527e+02 1.858e+02 2.241e+02 5.364e+02, threshold=3.716e+02, percent-clipped=3.0 +2023-03-27 05:15:48,106 INFO [finetune.py:976] (5/7) Epoch 24, batch 3050, loss[loss=0.1881, simple_loss=0.2551, pruned_loss=0.06056, over 4817.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2478, pruned_loss=0.05198, over 954344.25 frames. ], batch size: 47, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:15:57,600 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-27 05:16:29,257 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1053, 2.0221, 1.7595, 1.9962, 1.8846, 1.9106, 1.9337, 2.6832], + device='cuda:5'), covar=tensor([0.3710, 0.4014, 0.3194, 0.3522, 0.3963, 0.2327, 0.3793, 0.1527], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0264, 0.0235, 0.0277, 0.0258, 0.0228, 0.0256, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:16:35,921 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0431, 4.2432, 4.0909, 2.3042, 4.3727, 3.3726, 1.7159, 3.1264], + device='cuda:5'), covar=tensor([0.2051, 0.1668, 0.1480, 0.3344, 0.0811, 0.0914, 0.3760, 0.1485], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0180, 0.0162, 0.0130, 0.0161, 0.0125, 0.0149, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:16:39,365 INFO [finetune.py:976] (5/7) Epoch 24, batch 3100, loss[loss=0.1772, simple_loss=0.2534, pruned_loss=0.0505, over 4810.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.246, pruned_loss=0.05156, over 952839.96 frames. ], batch size: 41, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:16:56,667 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134862.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:16:58,378 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.835e+01 1.385e+02 1.668e+02 2.115e+02 5.080e+02, threshold=3.336e+02, percent-clipped=1.0 +2023-03-27 05:17:12,755 INFO [finetune.py:976] (5/7) Epoch 24, batch 3150, loss[loss=0.1601, simple_loss=0.2264, pruned_loss=0.04694, over 4904.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2429, pruned_loss=0.05047, over 953586.91 frames. ], batch size: 32, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:17:31,127 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9648, 2.1465, 1.7428, 1.7431, 2.4615, 2.5681, 2.1583, 1.9999], + device='cuda:5'), covar=tensor([0.0389, 0.0314, 0.0612, 0.0375, 0.0297, 0.0512, 0.0414, 0.0429], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0106, 0.0144, 0.0112, 0.0100, 0.0113, 0.0102, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7410e-05, 8.1287e-05, 1.1291e-04, 8.5600e-05, 7.7892e-05, 8.3760e-05, + 7.5966e-05, 8.5541e-05], device='cuda:5') +2023-03-27 05:17:37,256 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134923.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:17:38,963 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3670, 1.4173, 1.7701, 1.5787, 1.6893, 3.0290, 1.3957, 1.5732], + device='cuda:5'), covar=tensor([0.0977, 0.1739, 0.0955, 0.0949, 0.1430, 0.0276, 0.1446, 0.1733], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0072, 0.0076, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:17:46,597 INFO [finetune.py:976] (5/7) Epoch 24, batch 3200, loss[loss=0.1518, simple_loss=0.2231, pruned_loss=0.04023, over 4871.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2386, pruned_loss=0.04855, over 953625.38 frames. ], batch size: 31, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:18:03,010 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134960.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:18:05,932 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.665e+01 1.513e+02 1.793e+02 2.252e+02 3.579e+02, threshold=3.586e+02, percent-clipped=1.0 +2023-03-27 05:18:06,038 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134965.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:18:16,330 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4285, 2.3143, 1.9045, 2.6885, 2.3566, 2.0484, 2.8889, 2.5106], + device='cuda:5'), covar=tensor([0.1346, 0.2302, 0.2959, 0.2541, 0.2584, 0.1755, 0.3417, 0.1629], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0190, 0.0236, 0.0254, 0.0250, 0.0206, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:18:22,480 INFO [finetune.py:976] (5/7) Epoch 24, batch 3250, loss[loss=0.1744, simple_loss=0.2454, pruned_loss=0.0517, over 4902.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.2383, pruned_loss=0.04821, over 953272.64 frames. ], batch size: 36, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:18:45,537 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=135008.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:18:48,605 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=135013.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:19:04,066 INFO [finetune.py:976] (5/7) Epoch 24, batch 3300, loss[loss=0.2033, simple_loss=0.2793, pruned_loss=0.06365, over 4840.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2433, pruned_loss=0.04982, over 953724.41 frames. ], batch size: 47, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:19:08,386 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.1123, 4.4958, 4.6935, 4.9405, 4.8473, 4.5130, 5.2181, 1.6151], + device='cuda:5'), covar=tensor([0.0691, 0.0698, 0.0713, 0.0827, 0.1075, 0.1604, 0.0534, 0.5589], + device='cuda:5'), in_proj_covar=tensor([0.0347, 0.0246, 0.0281, 0.0293, 0.0336, 0.0287, 0.0306, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:19:08,832 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-03-27 05:19:23,522 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.563e+02 1.899e+02 2.275e+02 5.700e+02, threshold=3.799e+02, percent-clipped=2.0 +2023-03-27 05:19:44,194 INFO [finetune.py:976] (5/7) Epoch 24, batch 3350, loss[loss=0.2111, simple_loss=0.2818, pruned_loss=0.07023, over 4805.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2456, pruned_loss=0.05017, over 954821.24 frames. ], batch size: 41, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:19:58,533 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2608, 2.2054, 1.7493, 2.3011, 2.1811, 1.9206, 2.6184, 2.2731], + device='cuda:5'), covar=tensor([0.1251, 0.2228, 0.2821, 0.2397, 0.2408, 0.1579, 0.2642, 0.1533], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0190, 0.0236, 0.0254, 0.0250, 0.0206, 0.0213, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:20:04,390 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.70 vs. limit=5.0 +2023-03-27 05:20:17,131 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-27 05:20:21,441 INFO [finetune.py:976] (5/7) Epoch 24, batch 3400, loss[loss=0.2145, simple_loss=0.2772, pruned_loss=0.07592, over 4817.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2468, pruned_loss=0.05047, over 954664.67 frames. ], batch size: 38, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:20:40,371 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.077e+02 1.584e+02 1.828e+02 2.150e+02 3.792e+02, threshold=3.656e+02, percent-clipped=0.0 +2023-03-27 05:20:42,160 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.7971, 3.3564, 3.5319, 3.6972, 3.5738, 3.3266, 3.8886, 1.2301], + device='cuda:5'), covar=tensor([0.0997, 0.0925, 0.1067, 0.1038, 0.1435, 0.1938, 0.0957, 0.5720], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0247, 0.0283, 0.0294, 0.0338, 0.0287, 0.0308, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:20:44,643 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135171.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:20:53,187 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9140, 1.8654, 1.7000, 1.9963, 2.5131, 2.0996, 1.9979, 1.5251], + device='cuda:5'), covar=tensor([0.2226, 0.1926, 0.1889, 0.1701, 0.1690, 0.1116, 0.2065, 0.2057], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0213, 0.0216, 0.0199, 0.0246, 0.0192, 0.0218, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:20:54,274 INFO [finetune.py:976] (5/7) Epoch 24, batch 3450, loss[loss=0.1626, simple_loss=0.2354, pruned_loss=0.04486, over 4835.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2463, pruned_loss=0.05009, over 956367.85 frames. ], batch size: 30, lr: 3.05e-03, grad_scale: 16.0 +2023-03-27 05:21:27,772 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135218.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:21:40,733 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135232.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:21:47,100 INFO [finetune.py:976] (5/7) Epoch 24, batch 3500, loss[loss=0.1855, simple_loss=0.2549, pruned_loss=0.05806, over 4903.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2449, pruned_loss=0.05018, over 955246.78 frames. ], batch size: 43, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:22:06,081 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.678e+01 1.506e+02 1.714e+02 2.011e+02 3.544e+02, threshold=3.428e+02, percent-clipped=0.0 +2023-03-27 05:22:08,866 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-03-27 05:22:20,453 INFO [finetune.py:976] (5/7) Epoch 24, batch 3550, loss[loss=0.156, simple_loss=0.222, pruned_loss=0.04501, over 4761.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.2418, pruned_loss=0.04953, over 954738.31 frames. ], batch size: 27, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:22:21,937 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-03-27 05:22:31,843 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3259, 2.1783, 1.7816, 2.2358, 2.1454, 1.9724, 2.5154, 2.3059], + device='cuda:5'), covar=tensor([0.1312, 0.2346, 0.3130, 0.2842, 0.2961, 0.1722, 0.3654, 0.1772], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0237, 0.0255, 0.0251, 0.0207, 0.0215, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:22:43,655 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7097, 1.6640, 1.5585, 1.6348, 1.1353, 3.5255, 1.3649, 1.7920], + device='cuda:5'), covar=tensor([0.3072, 0.2360, 0.1989, 0.2315, 0.1692, 0.0175, 0.2697, 0.1209], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0116, 0.0120, 0.0123, 0.0113, 0.0096, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:22:54,387 INFO [finetune.py:976] (5/7) Epoch 24, batch 3600, loss[loss=0.191, simple_loss=0.2565, pruned_loss=0.06271, over 4897.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2395, pruned_loss=0.04854, over 955834.76 frames. ], batch size: 32, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:23:05,746 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0304, 0.9522, 0.8984, 1.1352, 1.1654, 1.1102, 0.9867, 0.8989], + device='cuda:5'), covar=tensor([0.0405, 0.0339, 0.0686, 0.0318, 0.0303, 0.0545, 0.0402, 0.0479], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0106, 0.0145, 0.0112, 0.0100, 0.0114, 0.0103, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7362e-05, 8.1392e-05, 1.1351e-04, 8.6002e-05, 7.8024e-05, 8.4478e-05, + 7.6393e-05, 8.5661e-05], device='cuda:5') +2023-03-27 05:23:12,795 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.699e+01 1.474e+02 1.759e+02 2.084e+02 3.295e+02, threshold=3.517e+02, percent-clipped=0.0 +2023-03-27 05:23:28,231 INFO [finetune.py:976] (5/7) Epoch 24, batch 3650, loss[loss=0.1676, simple_loss=0.2462, pruned_loss=0.04452, over 4820.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2425, pruned_loss=0.05018, over 956084.60 frames. ], batch size: 39, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:23:28,994 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7011, 1.6216, 1.4440, 1.8018, 1.9718, 1.7958, 1.2687, 1.3815], + device='cuda:5'), covar=tensor([0.1973, 0.1859, 0.1841, 0.1557, 0.1555, 0.1128, 0.2472, 0.1852], + device='cuda:5'), in_proj_covar=tensor([0.0243, 0.0209, 0.0212, 0.0196, 0.0242, 0.0189, 0.0215, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:23:33,876 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-27 05:24:00,831 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2698, 3.7242, 3.9482, 4.1875, 4.0481, 3.8238, 4.3744, 1.3352], + device='cuda:5'), covar=tensor([0.0898, 0.0864, 0.0918, 0.0943, 0.1388, 0.1783, 0.0781, 0.5845], + device='cuda:5'), in_proj_covar=tensor([0.0348, 0.0248, 0.0283, 0.0294, 0.0339, 0.0287, 0.0308, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:24:11,252 INFO [finetune.py:976] (5/7) Epoch 24, batch 3700, loss[loss=0.1376, simple_loss=0.22, pruned_loss=0.02762, over 4779.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.2464, pruned_loss=0.05111, over 956127.82 frames. ], batch size: 26, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:24:28,523 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.118e+01 1.614e+02 1.999e+02 2.429e+02 5.138e+02, threshold=3.998e+02, percent-clipped=6.0 +2023-03-27 05:24:43,336 INFO [finetune.py:976] (5/7) Epoch 24, batch 3750, loss[loss=0.1718, simple_loss=0.2413, pruned_loss=0.05112, over 4787.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.2486, pruned_loss=0.05194, over 956369.65 frames. ], batch size: 29, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:25:12,894 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135518.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:25:17,805 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.77 vs. limit=5.0 +2023-03-27 05:25:18,843 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135527.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:25:21,440 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.77 vs. limit=5.0 +2023-03-27 05:25:26,761 INFO [finetune.py:976] (5/7) Epoch 24, batch 3800, loss[loss=0.2062, simple_loss=0.2687, pruned_loss=0.07188, over 4859.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2497, pruned_loss=0.0522, over 956366.79 frames. ], batch size: 34, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:25:44,706 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.524e+02 1.815e+02 2.221e+02 4.659e+02, threshold=3.630e+02, percent-clipped=3.0 +2023-03-27 05:25:45,388 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=135566.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:25:51,995 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3624, 5.0625, 4.8864, 3.4685, 5.1596, 4.1120, 1.2372, 3.8055], + device='cuda:5'), covar=tensor([0.2094, 0.1485, 0.1512, 0.2240, 0.0682, 0.0682, 0.4607, 0.1161], + device='cuda:5'), in_proj_covar=tensor([0.0155, 0.0182, 0.0163, 0.0131, 0.0164, 0.0126, 0.0151, 0.0126], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:26:00,454 INFO [finetune.py:976] (5/7) Epoch 24, batch 3850, loss[loss=0.148, simple_loss=0.2257, pruned_loss=0.03517, over 4822.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.248, pruned_loss=0.0516, over 956272.32 frames. ], batch size: 39, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:26:38,632 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135630.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:26:45,795 INFO [finetune.py:976] (5/7) Epoch 24, batch 3900, loss[loss=0.1484, simple_loss=0.2144, pruned_loss=0.04115, over 4794.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.2448, pruned_loss=0.05046, over 958254.14 frames. ], batch size: 29, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:27:10,711 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.672e+01 1.400e+02 1.667e+02 1.961e+02 4.314e+02, threshold=3.334e+02, percent-clipped=1.0 +2023-03-27 05:27:26,035 INFO [finetune.py:976] (5/7) Epoch 24, batch 3950, loss[loss=0.1773, simple_loss=0.237, pruned_loss=0.05876, over 4757.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2412, pruned_loss=0.04911, over 958709.65 frames. ], batch size: 54, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:27:29,113 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135691.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:27:40,336 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-27 05:27:58,423 INFO [finetune.py:976] (5/7) Epoch 24, batch 4000, loss[loss=0.1513, simple_loss=0.2255, pruned_loss=0.0385, over 4769.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2414, pruned_loss=0.04963, over 958114.59 frames. ], batch size: 28, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:28:16,422 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.536e+01 1.548e+02 1.897e+02 2.315e+02 3.943e+02, threshold=3.793e+02, percent-clipped=5.0 +2023-03-27 05:28:31,229 INFO [finetune.py:976] (5/7) Epoch 24, batch 4050, loss[loss=0.1995, simple_loss=0.2719, pruned_loss=0.06358, over 4909.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2444, pruned_loss=0.05103, over 956547.91 frames. ], batch size: 37, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:28:59,162 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135827.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:29:09,952 INFO [finetune.py:976] (5/7) Epoch 24, batch 4100, loss[loss=0.149, simple_loss=0.2188, pruned_loss=0.03959, over 4731.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2464, pruned_loss=0.05138, over 957048.86 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:29:24,294 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8185, 1.7994, 1.6436, 2.0684, 2.0658, 2.0453, 1.5368, 1.6026], + device='cuda:5'), covar=tensor([0.2312, 0.1944, 0.1927, 0.1595, 0.1662, 0.1183, 0.2416, 0.2112], + device='cuda:5'), in_proj_covar=tensor([0.0241, 0.0208, 0.0211, 0.0194, 0.0241, 0.0188, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:29:32,642 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.978e+01 1.562e+02 1.866e+02 2.353e+02 4.250e+02, threshold=3.731e+02, percent-clipped=2.0 +2023-03-27 05:29:39,221 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=135875.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:29:46,950 INFO [finetune.py:976] (5/7) Epoch 24, batch 4150, loss[loss=0.1723, simple_loss=0.2537, pruned_loss=0.04545, over 4815.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2474, pruned_loss=0.0517, over 955853.39 frames. ], batch size: 38, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:30:30,444 INFO [finetune.py:976] (5/7) Epoch 24, batch 4200, loss[loss=0.1125, simple_loss=0.1904, pruned_loss=0.01726, over 4736.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2473, pruned_loss=0.0516, over 955123.37 frames. ], batch size: 27, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:30:42,055 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4670, 3.3758, 3.1475, 1.5342, 3.5810, 2.7168, 1.0020, 2.2202], + device='cuda:5'), covar=tensor([0.2478, 0.2068, 0.1878, 0.3429, 0.1215, 0.1005, 0.4194, 0.1532], + device='cuda:5'), in_proj_covar=tensor([0.0154, 0.0180, 0.0162, 0.0131, 0.0162, 0.0124, 0.0149, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:30:49,321 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.053e+01 1.587e+02 1.796e+02 2.438e+02 3.967e+02, threshold=3.591e+02, percent-clipped=1.0 +2023-03-27 05:31:00,646 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135982.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:31:03,040 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135986.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:31:03,578 INFO [finetune.py:976] (5/7) Epoch 24, batch 4250, loss[loss=0.144, simple_loss=0.2133, pruned_loss=0.03735, over 4757.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2459, pruned_loss=0.05091, over 955710.25 frames. ], batch size: 27, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:31:08,484 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135994.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:31:17,918 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2428, 1.2416, 1.3624, 0.6873, 1.3766, 1.5207, 1.4950, 1.3288], + device='cuda:5'), covar=tensor([0.1046, 0.0915, 0.0611, 0.0561, 0.0641, 0.0746, 0.0542, 0.0804], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0148, 0.0126, 0.0121, 0.0130, 0.0129, 0.0140, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.8768e-05, 1.0641e-04, 8.9831e-05, 8.5066e-05, 9.1552e-05, 9.1588e-05, + 1.0019e-04, 1.0560e-04], device='cuda:5') +2023-03-27 05:31:45,376 INFO [finetune.py:976] (5/7) Epoch 24, batch 4300, loss[loss=0.1892, simple_loss=0.2406, pruned_loss=0.06886, over 4897.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2437, pruned_loss=0.05025, over 956636.20 frames. ], batch size: 43, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:31:49,212 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136043.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:32:03,110 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136055.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:32:14,138 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.579e+01 1.491e+02 1.827e+02 2.181e+02 5.621e+02, threshold=3.653e+02, percent-clipped=1.0 +2023-03-27 05:32:31,268 INFO [finetune.py:976] (5/7) Epoch 24, batch 4350, loss[loss=0.1996, simple_loss=0.2641, pruned_loss=0.06752, over 4868.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2415, pruned_loss=0.04992, over 957058.42 frames. ], batch size: 34, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:32:36,653 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.7064, 1.7372, 1.7435, 1.0404, 1.8899, 2.0922, 1.9605, 1.5802], + device='cuda:5'), covar=tensor([0.0874, 0.0601, 0.0478, 0.0507, 0.0409, 0.0610, 0.0338, 0.0733], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0148, 0.0126, 0.0121, 0.0131, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9089e-05, 1.0682e-04, 9.0045e-05, 8.5337e-05, 9.1979e-05, 9.1747e-05, + 1.0050e-04, 1.0578e-04], device='cuda:5') +2023-03-27 05:33:02,189 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6258, 1.6122, 2.0257, 1.8073, 1.6926, 3.0935, 1.5033, 1.6370], + device='cuda:5'), covar=tensor([0.0929, 0.1527, 0.1439, 0.0862, 0.1419, 0.0297, 0.1343, 0.1554], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0072, 0.0076, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:33:04,535 INFO [finetune.py:976] (5/7) Epoch 24, batch 4400, loss[loss=0.2224, simple_loss=0.296, pruned_loss=0.0744, over 4853.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.2427, pruned_loss=0.05098, over 958856.13 frames. ], batch size: 44, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:33:08,176 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136142.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:33:18,768 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2024, 2.0710, 1.7137, 2.0959, 2.1470, 1.8831, 2.3959, 2.2235], + device='cuda:5'), covar=tensor([0.1351, 0.1928, 0.2809, 0.2542, 0.2506, 0.1626, 0.2630, 0.1660], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0237, 0.0255, 0.0251, 0.0207, 0.0215, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:33:23,891 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.065e+02 1.540e+02 1.819e+02 2.170e+02 3.954e+02, threshold=3.638e+02, percent-clipped=3.0 +2023-03-27 05:33:37,771 INFO [finetune.py:976] (5/7) Epoch 24, batch 4450, loss[loss=0.1211, simple_loss=0.1891, pruned_loss=0.0266, over 4654.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2461, pruned_loss=0.05175, over 958222.45 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:33:48,803 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136203.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:34:13,600 INFO [finetune.py:976] (5/7) Epoch 24, batch 4500, loss[loss=0.228, simple_loss=0.2901, pruned_loss=0.08299, over 4892.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2476, pruned_loss=0.05222, over 957207.26 frames. ], batch size: 36, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:34:39,505 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.886e+01 1.509e+02 1.852e+02 2.239e+02 3.856e+02, threshold=3.704e+02, percent-clipped=1.0 +2023-03-27 05:34:54,273 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136286.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:34:54,760 INFO [finetune.py:976] (5/7) Epoch 24, batch 4550, loss[loss=0.2169, simple_loss=0.2953, pruned_loss=0.06924, over 4727.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.2484, pruned_loss=0.05252, over 957634.33 frames. ], batch size: 54, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:34:57,268 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1581, 2.6035, 2.5657, 1.2850, 2.7784, 2.0354, 0.7737, 1.8368], + device='cuda:5'), covar=tensor([0.2136, 0.2362, 0.1720, 0.3429, 0.1234, 0.1169, 0.3902, 0.1544], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0178, 0.0160, 0.0129, 0.0160, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:35:20,907 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0349, 0.9928, 1.0146, 0.4238, 0.9144, 1.1975, 1.1980, 1.0211], + device='cuda:5'), covar=tensor([0.0843, 0.0579, 0.0592, 0.0522, 0.0549, 0.0583, 0.0370, 0.0689], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0126, 0.0122, 0.0132, 0.0129, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9677e-05, 1.0730e-04, 9.0242e-05, 8.5730e-05, 9.2389e-05, 9.1927e-05, + 1.0104e-04, 1.0614e-04], device='cuda:5') +2023-03-27 05:35:28,227 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=136334.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:35:28,894 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136335.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:35:30,009 INFO [finetune.py:976] (5/7) Epoch 24, batch 4600, loss[loss=0.1641, simple_loss=0.2232, pruned_loss=0.05244, over 4914.00 frames. ], tot_loss[loss=0.176, simple_loss=0.248, pruned_loss=0.05198, over 955891.34 frames. ], batch size: 37, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:35:35,220 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136338.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:35:45,768 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136350.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:35:56,272 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.033e+02 1.624e+02 1.856e+02 2.259e+02 4.732e+02, threshold=3.713e+02, percent-clipped=2.0 +2023-03-27 05:36:10,062 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 05:36:11,519 INFO [finetune.py:976] (5/7) Epoch 24, batch 4650, loss[loss=0.1609, simple_loss=0.2405, pruned_loss=0.0407, over 4784.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2455, pruned_loss=0.05149, over 954782.49 frames. ], batch size: 29, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:36:17,109 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136396.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:36:42,550 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2941, 2.2207, 1.8358, 2.1564, 2.2662, 1.9990, 2.5889, 2.3666], + device='cuda:5'), covar=tensor([0.1307, 0.1953, 0.2875, 0.2415, 0.2404, 0.1633, 0.2777, 0.1611], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0190, 0.0236, 0.0254, 0.0250, 0.0206, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:36:45,427 INFO [finetune.py:976] (5/7) Epoch 24, batch 4700, loss[loss=0.1471, simple_loss=0.2169, pruned_loss=0.03859, over 4790.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2423, pruned_loss=0.0507, over 954964.34 frames. ], batch size: 29, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:37:13,730 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.479e+02 1.754e+02 2.064e+02 3.231e+02, threshold=3.507e+02, percent-clipped=0.0 +2023-03-27 05:37:38,212 INFO [finetune.py:976] (5/7) Epoch 24, batch 4750, loss[loss=0.1504, simple_loss=0.232, pruned_loss=0.03439, over 4860.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2408, pruned_loss=0.05029, over 955614.99 frames. ], batch size: 31, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:37:44,921 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136498.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:38:05,138 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1978, 2.0524, 1.7321, 1.9913, 1.9678, 1.9618, 1.9726, 2.7182], + device='cuda:5'), covar=tensor([0.3705, 0.4485, 0.3515, 0.3722, 0.3838, 0.2520, 0.3857, 0.1629], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0264, 0.0235, 0.0277, 0.0258, 0.0229, 0.0256, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:38:10,343 INFO [finetune.py:976] (5/7) Epoch 24, batch 4800, loss[loss=0.2325, simple_loss=0.3014, pruned_loss=0.08183, over 4810.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2431, pruned_loss=0.05109, over 955973.24 frames. ], batch size: 38, lr: 3.04e-03, grad_scale: 16.0 +2023-03-27 05:38:28,976 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.582e+02 2.021e+02 2.347e+02 5.093e+02, threshold=4.042e+02, percent-clipped=3.0 +2023-03-27 05:38:42,986 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6768, 1.4848, 1.0370, 0.2798, 1.3422, 1.4897, 1.4593, 1.4495], + device='cuda:5'), covar=tensor([0.0882, 0.0868, 0.1370, 0.2034, 0.1432, 0.2429, 0.2387, 0.0853], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0191, 0.0200, 0.0181, 0.0211, 0.0210, 0.0224, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:38:43,644 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-27 05:38:44,073 INFO [finetune.py:976] (5/7) Epoch 24, batch 4850, loss[loss=0.2117, simple_loss=0.2826, pruned_loss=0.07039, over 4908.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2458, pruned_loss=0.05155, over 953430.82 frames. ], batch size: 37, lr: 3.04e-03, grad_scale: 32.0 +2023-03-27 05:39:17,548 INFO [finetune.py:976] (5/7) Epoch 24, batch 4900, loss[loss=0.2068, simple_loss=0.2749, pruned_loss=0.06932, over 4789.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.2472, pruned_loss=0.05204, over 952862.50 frames. ], batch size: 51, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:39:18,259 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136638.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:39:22,285 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136643.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:39:26,570 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136650.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:39:34,007 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.09 vs. limit=5.0 +2023-03-27 05:39:42,312 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.600e+02 1.925e+02 2.438e+02 3.559e+02, threshold=3.849e+02, percent-clipped=0.0 +2023-03-27 05:39:59,921 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=136686.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:40:00,203 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-27 05:40:00,478 INFO [finetune.py:976] (5/7) Epoch 24, batch 4950, loss[loss=0.1993, simple_loss=0.2703, pruned_loss=0.06413, over 4166.00 frames. ], tot_loss[loss=0.1778, simple_loss=0.2499, pruned_loss=0.05282, over 955141.97 frames. ], batch size: 66, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:40:03,953 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136691.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:40:08,773 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=136698.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:40:12,487 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136704.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:40:33,840 INFO [finetune.py:976] (5/7) Epoch 24, batch 5000, loss[loss=0.1519, simple_loss=0.2311, pruned_loss=0.03632, over 4815.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2483, pruned_loss=0.0522, over 955175.56 frames. ], batch size: 39, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:41:02,608 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.520e+02 1.782e+02 2.173e+02 3.913e+02, threshold=3.563e+02, percent-clipped=1.0 +2023-03-27 05:41:08,911 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4884, 2.3183, 1.7808, 0.8931, 2.0388, 2.0613, 1.8532, 2.0759], + device='cuda:5'), covar=tensor([0.0855, 0.0761, 0.1437, 0.1878, 0.1237, 0.2118, 0.2070, 0.0821], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0190, 0.0198, 0.0180, 0.0209, 0.0208, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:41:14,786 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6795, 0.7571, 1.8215, 1.7044, 1.6011, 1.5661, 1.5792, 1.7475], + device='cuda:5'), covar=tensor([0.3738, 0.3719, 0.3085, 0.3297, 0.4221, 0.3231, 0.3896, 0.2831], + device='cuda:5'), in_proj_covar=tensor([0.0262, 0.0246, 0.0266, 0.0292, 0.0291, 0.0268, 0.0298, 0.0249], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:41:17,088 INFO [finetune.py:976] (5/7) Epoch 24, batch 5050, loss[loss=0.1245, simple_loss=0.2039, pruned_loss=0.02258, over 4791.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.245, pruned_loss=0.05131, over 953519.74 frames. ], batch size: 29, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:41:21,319 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9658, 1.2969, 1.9594, 1.9511, 1.7608, 1.7393, 1.9006, 1.8450], + device='cuda:5'), covar=tensor([0.3818, 0.3816, 0.3129, 0.3247, 0.4521, 0.3482, 0.3976, 0.2933], + device='cuda:5'), in_proj_covar=tensor([0.0262, 0.0246, 0.0266, 0.0292, 0.0291, 0.0268, 0.0298, 0.0249], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:41:25,184 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136798.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:41:30,686 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136806.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:41:49,840 INFO [finetune.py:976] (5/7) Epoch 24, batch 5100, loss[loss=0.1256, simple_loss=0.1974, pruned_loss=0.02689, over 4768.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2425, pruned_loss=0.05066, over 954935.62 frames. ], batch size: 28, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:41:56,306 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=136846.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:42:11,832 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.036e+02 1.576e+02 1.921e+02 2.257e+02 4.191e+02, threshold=3.841e+02, percent-clipped=1.0 +2023-03-27 05:42:17,769 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136867.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:42:26,861 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0031, 1.9242, 1.5557, 1.8781, 1.9640, 1.6601, 2.2299, 2.0019], + device='cuda:5'), covar=tensor([0.1457, 0.2019, 0.3174, 0.2315, 0.2588, 0.1893, 0.2471, 0.1834], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0188, 0.0234, 0.0251, 0.0248, 0.0204, 0.0212, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:42:35,177 INFO [finetune.py:976] (5/7) Epoch 24, batch 5150, loss[loss=0.1943, simple_loss=0.2717, pruned_loss=0.0585, over 4858.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2428, pruned_loss=0.05121, over 955669.49 frames. ], batch size: 44, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:43:16,539 INFO [finetune.py:976] (5/7) Epoch 24, batch 5200, loss[loss=0.1926, simple_loss=0.2677, pruned_loss=0.05878, over 4811.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2461, pruned_loss=0.05219, over 954608.37 frames. ], batch size: 45, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:43:28,443 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1809, 1.5502, 0.7954, 1.8840, 2.4730, 1.7112, 1.6721, 1.8843], + device='cuda:5'), covar=tensor([0.1325, 0.1874, 0.1992, 0.1163, 0.1700, 0.1895, 0.1393, 0.1873], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0110, 0.0092, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 05:43:32,011 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1008, 1.4917, 0.7726, 1.7985, 2.3393, 1.6484, 1.6096, 1.8312], + device='cuda:5'), covar=tensor([0.1511, 0.1912, 0.2045, 0.1224, 0.1957, 0.2015, 0.1399, 0.2060], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0110, 0.0092, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 05:43:35,517 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.133e+02 1.647e+02 1.940e+02 2.397e+02 3.428e+02, threshold=3.879e+02, percent-clipped=0.0 +2023-03-27 05:43:37,635 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-03-27 05:43:48,852 INFO [finetune.py:976] (5/7) Epoch 24, batch 5250, loss[loss=0.1661, simple_loss=0.2424, pruned_loss=0.04483, over 4904.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2476, pruned_loss=0.05217, over 955450.08 frames. ], batch size: 36, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:43:51,862 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136991.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:43:56,166 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6411, 1.6735, 1.4291, 1.6415, 1.9748, 1.9419, 1.6632, 1.5128], + device='cuda:5'), covar=tensor([0.0349, 0.0340, 0.0610, 0.0302, 0.0199, 0.0396, 0.0327, 0.0413], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0107, 0.0146, 0.0112, 0.0102, 0.0115, 0.0103, 0.0114], + device='cuda:5'), out_proj_covar=tensor([7.8390e-05, 8.2055e-05, 1.1455e-04, 8.6055e-05, 7.8901e-05, 8.4836e-05, + 7.6707e-05, 8.6404e-05], device='cuda:5') +2023-03-27 05:43:57,216 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136999.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:44:20,305 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2354, 2.0952, 1.5812, 0.6521, 1.7584, 1.9002, 1.7352, 1.9183], + device='cuda:5'), covar=tensor([0.0934, 0.0725, 0.1574, 0.1947, 0.1309, 0.2342, 0.2313, 0.0843], + device='cuda:5'), in_proj_covar=tensor([0.0169, 0.0190, 0.0198, 0.0180, 0.0209, 0.0207, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:44:22,623 INFO [finetune.py:976] (5/7) Epoch 24, batch 5300, loss[loss=0.1882, simple_loss=0.2743, pruned_loss=0.05103, over 4842.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2479, pruned_loss=0.05215, over 955550.07 frames. ], batch size: 44, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:44:23,948 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=137039.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:44:24,387 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.72 vs. limit=5.0 +2023-03-27 05:44:42,412 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.600e+02 1.832e+02 2.198e+02 3.821e+02, threshold=3.665e+02, percent-clipped=0.0 +2023-03-27 05:45:05,791 INFO [finetune.py:976] (5/7) Epoch 24, batch 5350, loss[loss=0.1356, simple_loss=0.2046, pruned_loss=0.03331, over 4760.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2459, pruned_loss=0.0508, over 953488.13 frames. ], batch size: 28, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:45:19,351 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4971, 1.4026, 1.8731, 1.7272, 1.5607, 3.2690, 1.3057, 1.5653], + device='cuda:5'), covar=tensor([0.0982, 0.1775, 0.1139, 0.0957, 0.1571, 0.0262, 0.1565, 0.1716], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:45:26,234 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8850, 4.1211, 3.8300, 2.1064, 4.1771, 3.2441, 1.2098, 2.9741], + device='cuda:5'), covar=tensor([0.2025, 0.2002, 0.1544, 0.3312, 0.0784, 0.0917, 0.4094, 0.1431], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0128, 0.0160, 0.0122, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:45:32,173 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7804, 1.8009, 2.4171, 1.9588, 2.0522, 4.5160, 1.7569, 1.9421], + device='cuda:5'), covar=tensor([0.0918, 0.1698, 0.0934, 0.0882, 0.1405, 0.0138, 0.1356, 0.1625], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:45:38,837 INFO [finetune.py:976] (5/7) Epoch 24, batch 5400, loss[loss=0.2229, simple_loss=0.2888, pruned_loss=0.0785, over 4898.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2438, pruned_loss=0.04992, over 955685.76 frames. ], batch size: 36, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:45:57,157 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137162.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:45:58,905 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.720e+01 1.449e+02 1.770e+02 2.209e+02 4.288e+02, threshold=3.541e+02, percent-clipped=1.0 +2023-03-27 05:46:22,824 INFO [finetune.py:976] (5/7) Epoch 24, batch 5450, loss[loss=0.1486, simple_loss=0.2225, pruned_loss=0.03735, over 4911.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2413, pruned_loss=0.049, over 956769.02 frames. ], batch size: 35, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:46:29,579 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6667, 1.6701, 1.5425, 1.6287, 1.2466, 3.5485, 1.4739, 1.8730], + device='cuda:5'), covar=tensor([0.3331, 0.2488, 0.2180, 0.2404, 0.1626, 0.0194, 0.2478, 0.1185], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0123, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:46:31,913 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0156, 0.9450, 0.9108, 1.1272, 1.1670, 1.0892, 0.9874, 0.8976], + device='cuda:5'), covar=tensor([0.0394, 0.0323, 0.0669, 0.0307, 0.0275, 0.0466, 0.0340, 0.0430], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0106, 0.0145, 0.0112, 0.0101, 0.0113, 0.0102, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7535e-05, 8.1364e-05, 1.1348e-04, 8.5392e-05, 7.8403e-05, 8.3918e-05, + 7.5725e-05, 8.5616e-05], device='cuda:5') +2023-03-27 05:46:53,145 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 05:46:55,994 INFO [finetune.py:976] (5/7) Epoch 24, batch 5500, loss[loss=0.1453, simple_loss=0.2164, pruned_loss=0.03713, over 4756.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2388, pruned_loss=0.04877, over 956654.32 frames. ], batch size: 27, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:47:13,428 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.031e+02 1.493e+02 1.897e+02 2.213e+02 3.719e+02, threshold=3.794e+02, percent-clipped=2.0 +2023-03-27 05:47:36,857 INFO [finetune.py:976] (5/7) Epoch 24, batch 5550, loss[loss=0.2185, simple_loss=0.2861, pruned_loss=0.07544, over 4809.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2408, pruned_loss=0.04941, over 957667.07 frames. ], batch size: 45, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:47:47,644 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137299.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:48:20,577 INFO [finetune.py:976] (5/7) Epoch 24, batch 5600, loss[loss=0.1913, simple_loss=0.271, pruned_loss=0.05578, over 4819.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2453, pruned_loss=0.05098, over 957302.95 frames. ], batch size: 40, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:48:26,395 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=137347.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:48:35,072 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 05:48:37,702 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.584e+02 1.843e+02 2.259e+02 3.753e+02, threshold=3.686e+02, percent-clipped=0.0 +2023-03-27 05:48:51,117 INFO [finetune.py:976] (5/7) Epoch 24, batch 5650, loss[loss=0.1267, simple_loss=0.1988, pruned_loss=0.02728, over 4173.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2462, pruned_loss=0.05091, over 955770.30 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:48:55,852 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1203, 1.2646, 0.8624, 1.8635, 2.3093, 1.6926, 1.5266, 1.6474], + device='cuda:5'), covar=tensor([0.1573, 0.2414, 0.2200, 0.1305, 0.2021, 0.2093, 0.1633, 0.2345], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0092, 0.0119, 0.0092, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 05:49:08,641 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8148, 1.7022, 2.0524, 3.0567, 2.0975, 2.3694, 1.2823, 2.5053], + device='cuda:5'), covar=tensor([0.1589, 0.1298, 0.1294, 0.0814, 0.0827, 0.1308, 0.1719, 0.0610], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0133, 0.0163, 0.0102, 0.0136, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 05:49:11,612 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4970, 1.4189, 1.4264, 1.3647, 0.9759, 2.2023, 0.7798, 1.2844], + device='cuda:5'), covar=tensor([0.3012, 0.2416, 0.2072, 0.2333, 0.1651, 0.0345, 0.2463, 0.1190], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0123, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:49:20,946 INFO [finetune.py:976] (5/7) Epoch 24, batch 5700, loss[loss=0.1251, simple_loss=0.1898, pruned_loss=0.03022, over 4431.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2429, pruned_loss=0.05001, over 940390.20 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:49:24,964 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-03-27 05:49:35,756 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137462.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:49:52,034 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.393e+01 1.414e+02 1.686e+02 2.138e+02 3.465e+02, threshold=3.373e+02, percent-clipped=0.0 +2023-03-27 05:49:52,050 INFO [finetune.py:976] (5/7) Epoch 25, batch 0, loss[loss=0.238, simple_loss=0.3062, pruned_loss=0.0849, over 4890.00 frames. ], tot_loss[loss=0.238, simple_loss=0.3062, pruned_loss=0.0849, over 4890.00 frames. ], batch size: 46, lr: 3.03e-03, grad_scale: 32.0 +2023-03-27 05:49:52,050 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 05:49:58,554 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6276, 3.5963, 3.4147, 1.6478, 3.6057, 2.8306, 0.8373, 2.5047], + device='cuda:5'), covar=tensor([0.1745, 0.1620, 0.1621, 0.2922, 0.1132, 0.0966, 0.3459, 0.1435], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0177, 0.0160, 0.0129, 0.0161, 0.0123, 0.0147, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 05:50:06,682 INFO [finetune.py:1010] (5/7) Epoch 25, validation: loss=0.1587, simple_loss=0.2267, pruned_loss=0.04536, over 2265189.00 frames. +2023-03-27 05:50:06,683 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 05:50:46,519 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=137510.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:50:50,026 INFO [finetune.py:976] (5/7) Epoch 25, batch 50, loss[loss=0.1868, simple_loss=0.2545, pruned_loss=0.05954, over 4910.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2474, pruned_loss=0.05129, over 217031.07 frames. ], batch size: 38, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:50:52,333 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8245, 1.9021, 1.7473, 1.8729, 1.6526, 4.5468, 1.7470, 2.0468], + device='cuda:5'), covar=tensor([0.2968, 0.2349, 0.2005, 0.2173, 0.1392, 0.0113, 0.2288, 0.1197], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0123, 0.0113, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:50:54,349 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.38 vs. limit=5.0 +2023-03-27 05:51:06,083 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6658, 1.5084, 2.2110, 1.9041, 1.7698, 4.3103, 1.3737, 1.6275], + device='cuda:5'), covar=tensor([0.1150, 0.2305, 0.1308, 0.1129, 0.1841, 0.0210, 0.1976, 0.2284], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0081, 0.0073, 0.0075, 0.0090, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 05:51:09,927 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9165, 1.6147, 2.3267, 3.6867, 2.4529, 2.7329, 1.1590, 3.0434], + device='cuda:5'), covar=tensor([0.1800, 0.1595, 0.1487, 0.0633, 0.0897, 0.1757, 0.1844, 0.0512], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0163, 0.0101, 0.0136, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 05:51:12,267 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0833, 1.8581, 1.8964, 0.7777, 2.2104, 2.3612, 2.1595, 1.7809], + device='cuda:5'), covar=tensor([0.0942, 0.0691, 0.0537, 0.0689, 0.0494, 0.0622, 0.0407, 0.0721], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0150, 0.0127, 0.0123, 0.0131, 0.0130, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([8.9609e-05, 1.0756e-04, 9.0843e-05, 8.6264e-05, 9.2124e-05, 9.2654e-05, + 1.0144e-04, 1.0627e-04], device='cuda:5') +2023-03-27 05:51:25,239 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.580e+02 1.851e+02 2.170e+02 4.183e+02, threshold=3.702e+02, percent-clipped=2.0 +2023-03-27 05:51:25,255 INFO [finetune.py:976] (5/7) Epoch 25, batch 100, loss[loss=0.1621, simple_loss=0.2372, pruned_loss=0.04348, over 4774.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2392, pruned_loss=0.04825, over 381436.00 frames. ], batch size: 29, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:51:59,273 INFO [finetune.py:976] (5/7) Epoch 25, batch 150, loss[loss=0.1829, simple_loss=0.2552, pruned_loss=0.05527, over 4820.00 frames. ], tot_loss[loss=0.1647, simple_loss=0.2343, pruned_loss=0.04751, over 509183.34 frames. ], batch size: 39, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:52:09,244 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-27 05:52:33,560 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.113e+02 1.544e+02 1.791e+02 2.141e+02 4.771e+02, threshold=3.582e+02, percent-clipped=2.0 +2023-03-27 05:52:33,576 INFO [finetune.py:976] (5/7) Epoch 25, batch 200, loss[loss=0.1709, simple_loss=0.2338, pruned_loss=0.05396, over 4901.00 frames. ], tot_loss[loss=0.1651, simple_loss=0.2353, pruned_loss=0.04745, over 610504.84 frames. ], batch size: 35, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:52:49,737 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 05:53:26,996 INFO [finetune.py:976] (5/7) Epoch 25, batch 250, loss[loss=0.1579, simple_loss=0.2257, pruned_loss=0.04504, over 4813.00 frames. ], tot_loss[loss=0.169, simple_loss=0.2399, pruned_loss=0.04902, over 686851.23 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:53:31,854 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9738, 1.9210, 1.5559, 1.6603, 1.7752, 1.7379, 1.8259, 2.4512], + device='cuda:5'), covar=tensor([0.3854, 0.3875, 0.3438, 0.3623, 0.3825, 0.2498, 0.3577, 0.1776], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0265, 0.0236, 0.0277, 0.0260, 0.0229, 0.0257, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:54:00,392 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.518e+01 1.603e+02 1.998e+02 2.287e+02 4.515e+02, threshold=3.995e+02, percent-clipped=2.0 +2023-03-27 05:54:00,408 INFO [finetune.py:976] (5/7) Epoch 25, batch 300, loss[loss=0.2287, simple_loss=0.2772, pruned_loss=0.09014, over 4756.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2446, pruned_loss=0.05021, over 746258.38 frames. ], batch size: 27, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:54:01,152 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5886, 0.7514, 1.6588, 1.5849, 1.4659, 1.3973, 1.5507, 1.5558], + device='cuda:5'), covar=tensor([0.3311, 0.3433, 0.2794, 0.2868, 0.3886, 0.3145, 0.3509, 0.2571], + device='cuda:5'), in_proj_covar=tensor([0.0262, 0.0246, 0.0266, 0.0291, 0.0291, 0.0267, 0.0297, 0.0249], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:54:33,830 INFO [finetune.py:976] (5/7) Epoch 25, batch 350, loss[loss=0.1639, simple_loss=0.2526, pruned_loss=0.03762, over 4800.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2461, pruned_loss=0.05082, over 793569.13 frames. ], batch size: 40, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:55:00,346 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-03-27 05:55:07,124 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.541e+02 1.822e+02 2.129e+02 2.910e+02, threshold=3.644e+02, percent-clipped=0.0 +2023-03-27 05:55:07,139 INFO [finetune.py:976] (5/7) Epoch 25, batch 400, loss[loss=0.1952, simple_loss=0.262, pruned_loss=0.06417, over 4814.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2475, pruned_loss=0.05147, over 830461.76 frames. ], batch size: 33, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:55:31,421 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137892.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:55:33,187 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137895.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:55:53,568 INFO [finetune.py:976] (5/7) Epoch 25, batch 450, loss[loss=0.223, simple_loss=0.2805, pruned_loss=0.08279, over 4814.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2469, pruned_loss=0.0514, over 857075.21 frames. ], batch size: 41, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:55:54,829 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8178, 1.4304, 0.8459, 1.6543, 2.1771, 1.5502, 1.5935, 1.7714], + device='cuda:5'), covar=tensor([0.1389, 0.1872, 0.1827, 0.1114, 0.1748, 0.1765, 0.1281, 0.1809], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0092, 0.0118, 0.0092, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 05:56:19,220 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137953.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:56:20,974 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137956.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:56:26,870 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.118e+02 1.489e+02 1.801e+02 2.267e+02 5.324e+02, threshold=3.602e+02, percent-clipped=3.0 +2023-03-27 05:56:26,886 INFO [finetune.py:976] (5/7) Epoch 25, batch 500, loss[loss=0.1472, simple_loss=0.2156, pruned_loss=0.03943, over 4190.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.243, pruned_loss=0.04994, over 877047.56 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:56:57,213 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-27 05:56:58,228 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2330, 1.2176, 1.1263, 1.2458, 1.3924, 1.4030, 1.2786, 1.1207], + device='cuda:5'), covar=tensor([0.0439, 0.0273, 0.0605, 0.0277, 0.0230, 0.0353, 0.0284, 0.0377], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0106, 0.0145, 0.0112, 0.0101, 0.0114, 0.0102, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7783e-05, 8.1419e-05, 1.1367e-04, 8.5842e-05, 7.8596e-05, 8.3952e-05, + 7.5641e-05, 8.5857e-05], device='cuda:5') +2023-03-27 05:57:01,546 INFO [finetune.py:976] (5/7) Epoch 25, batch 550, loss[loss=0.193, simple_loss=0.253, pruned_loss=0.06648, over 4904.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2403, pruned_loss=0.04925, over 894308.06 frames. ], batch size: 43, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:57:34,659 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.805e+01 1.427e+02 1.740e+02 1.994e+02 3.808e+02, threshold=3.480e+02, percent-clipped=1.0 +2023-03-27 05:57:34,675 INFO [finetune.py:976] (5/7) Epoch 25, batch 600, loss[loss=0.1802, simple_loss=0.2571, pruned_loss=0.0516, over 4812.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2417, pruned_loss=0.04974, over 908999.23 frames. ], batch size: 41, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:58:07,364 INFO [finetune.py:976] (5/7) Epoch 25, batch 650, loss[loss=0.1478, simple_loss=0.2241, pruned_loss=0.0358, over 4781.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2457, pruned_loss=0.05107, over 917329.86 frames. ], batch size: 29, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:58:57,510 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.63 vs. limit=5.0 +2023-03-27 05:58:59,109 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.613e+02 1.882e+02 2.325e+02 5.163e+02, threshold=3.765e+02, percent-clipped=4.0 +2023-03-27 05:58:59,125 INFO [finetune.py:976] (5/7) Epoch 25, batch 700, loss[loss=0.1834, simple_loss=0.2538, pruned_loss=0.05648, over 4890.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2473, pruned_loss=0.05131, over 924494.65 frames. ], batch size: 32, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:59:12,061 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-27 05:59:23,698 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 05:59:30,175 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9216, 1.8541, 1.5572, 2.0417, 2.5425, 2.0709, 1.8095, 1.5068], + device='cuda:5'), covar=tensor([0.1921, 0.1763, 0.1737, 0.1481, 0.1352, 0.1023, 0.1896, 0.1693], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0211, 0.0214, 0.0197, 0.0243, 0.0190, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 05:59:32,529 INFO [finetune.py:976] (5/7) Epoch 25, batch 750, loss[loss=0.1619, simple_loss=0.2394, pruned_loss=0.04221, over 4830.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2476, pruned_loss=0.05163, over 930492.85 frames. ], batch size: 30, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 05:59:53,528 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138248.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 05:59:55,852 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138251.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:00:05,195 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.386e+01 1.513e+02 1.803e+02 2.270e+02 6.862e+02, threshold=3.605e+02, percent-clipped=3.0 +2023-03-27 06:00:05,211 INFO [finetune.py:976] (5/7) Epoch 25, batch 800, loss[loss=0.1301, simple_loss=0.1891, pruned_loss=0.03557, over 3362.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2466, pruned_loss=0.05091, over 935350.62 frames. ], batch size: 13, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 06:00:08,347 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138270.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:00:19,837 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.56 vs. limit=2.0 +2023-03-27 06:00:38,347 INFO [finetune.py:976] (5/7) Epoch 25, batch 850, loss[loss=0.1391, simple_loss=0.2247, pruned_loss=0.02674, over 4764.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2457, pruned_loss=0.05106, over 940185.23 frames. ], batch size: 28, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 06:00:44,519 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138322.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:00:45,808 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2493, 2.2634, 1.8625, 2.3121, 2.1126, 2.1036, 2.1116, 3.0328], + device='cuda:5'), covar=tensor([0.3776, 0.4717, 0.3509, 0.4450, 0.4360, 0.2545, 0.4207, 0.1728], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0264, 0.0235, 0.0276, 0.0259, 0.0229, 0.0257, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:00:54,541 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138331.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:01:24,849 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.388e+02 1.706e+02 2.091e+02 3.563e+02, threshold=3.412e+02, percent-clipped=0.0 +2023-03-27 06:01:24,864 INFO [finetune.py:976] (5/7) Epoch 25, batch 900, loss[loss=0.1255, simple_loss=0.1973, pruned_loss=0.02685, over 4801.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2431, pruned_loss=0.04999, over 944623.12 frames. ], batch size: 29, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 06:01:36,372 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138383.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:01:55,436 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2655, 2.1530, 1.9645, 2.4606, 2.8531, 2.3310, 2.2815, 1.7173], + device='cuda:5'), covar=tensor([0.2456, 0.2247, 0.2119, 0.1763, 0.1840, 0.1199, 0.2271, 0.2200], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0213, 0.0216, 0.0198, 0.0245, 0.0192, 0.0218, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:01:57,679 INFO [finetune.py:976] (5/7) Epoch 25, batch 950, loss[loss=0.1724, simple_loss=0.2437, pruned_loss=0.05056, over 4824.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2423, pruned_loss=0.04989, over 945851.45 frames. ], batch size: 30, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 06:02:03,933 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6795, 1.8980, 1.4724, 1.4387, 2.1201, 2.1769, 1.8632, 1.8086], + device='cuda:5'), covar=tensor([0.0472, 0.0397, 0.0719, 0.0395, 0.0343, 0.0693, 0.0404, 0.0387], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0107, 0.0146, 0.0112, 0.0101, 0.0113, 0.0102, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7886e-05, 8.1737e-05, 1.1419e-04, 8.5897e-05, 7.8502e-05, 8.3867e-05, + 7.5927e-05, 8.6010e-05], device='cuda:5') +2023-03-27 06:02:30,847 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.004e+02 1.447e+02 1.781e+02 2.298e+02 4.571e+02, threshold=3.563e+02, percent-clipped=3.0 +2023-03-27 06:02:30,863 INFO [finetune.py:976] (5/7) Epoch 25, batch 1000, loss[loss=0.1716, simple_loss=0.2421, pruned_loss=0.05056, over 4829.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2445, pruned_loss=0.05054, over 948649.37 frames. ], batch size: 33, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 06:02:35,736 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8221, 1.6657, 1.4779, 1.2486, 1.6674, 1.6170, 1.6214, 2.1850], + device='cuda:5'), covar=tensor([0.3708, 0.3495, 0.3145, 0.3599, 0.3417, 0.2235, 0.3323, 0.1778], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0234, 0.0275, 0.0258, 0.0228, 0.0256, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:03:03,761 INFO [finetune.py:976] (5/7) Epoch 25, batch 1050, loss[loss=0.173, simple_loss=0.2462, pruned_loss=0.04989, over 4903.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2456, pruned_loss=0.05005, over 952149.96 frames. ], batch size: 37, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 06:03:07,273 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.1031, 0.9826, 0.9907, 0.3953, 0.8956, 1.1740, 1.1778, 1.0057], + device='cuda:5'), covar=tensor([0.0859, 0.0550, 0.0539, 0.0544, 0.0562, 0.0628, 0.0383, 0.0620], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0127, 0.0122, 0.0131, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9189e-05, 1.0684e-04, 9.0393e-05, 8.5855e-05, 9.1832e-05, 9.2029e-05, + 1.0062e-04, 1.0546e-04], device='cuda:5') +2023-03-27 06:03:25,370 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138548.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:03:27,131 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138551.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:03:39,079 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.570e+02 1.874e+02 2.177e+02 7.699e+02, threshold=3.747e+02, percent-clipped=3.0 +2023-03-27 06:03:39,095 INFO [finetune.py:976] (5/7) Epoch 25, batch 1100, loss[loss=0.1794, simple_loss=0.2579, pruned_loss=0.05047, over 4896.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2482, pruned_loss=0.05127, over 953960.48 frames. ], batch size: 35, lr: 3.02e-03, grad_scale: 32.0 +2023-03-27 06:04:17,875 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=138596.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:04:19,641 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=138599.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:04:29,874 INFO [finetune.py:976] (5/7) Epoch 25, batch 1150, loss[loss=0.1914, simple_loss=0.2511, pruned_loss=0.06585, over 4748.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.2481, pruned_loss=0.05119, over 954056.78 frames. ], batch size: 27, lr: 3.02e-03, grad_scale: 64.0 +2023-03-27 06:04:39,030 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138626.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:05:01,343 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-03-27 06:05:03,420 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.832e+01 1.520e+02 1.733e+02 2.222e+02 3.582e+02, threshold=3.466e+02, percent-clipped=0.0 +2023-03-27 06:05:03,436 INFO [finetune.py:976] (5/7) Epoch 25, batch 1200, loss[loss=0.1486, simple_loss=0.224, pruned_loss=0.03665, over 4766.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2461, pruned_loss=0.05048, over 954931.29 frames. ], batch size: 28, lr: 3.02e-03, grad_scale: 64.0 +2023-03-27 06:05:09,911 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 06:05:13,802 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138678.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:05:15,631 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138681.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:05:18,729 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138686.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:05:25,324 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5434, 1.5803, 1.5729, 0.8451, 1.7554, 1.9267, 1.9154, 1.4666], + device='cuda:5'), covar=tensor([0.0865, 0.0629, 0.0520, 0.0551, 0.0472, 0.0579, 0.0307, 0.0707], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0127, 0.0122, 0.0132, 0.0130, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9339e-05, 1.0706e-04, 9.0676e-05, 8.6174e-05, 9.2435e-05, 9.2189e-05, + 1.0094e-04, 1.0574e-04], device='cuda:5') +2023-03-27 06:05:25,907 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138697.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:05:37,221 INFO [finetune.py:976] (5/7) Epoch 25, batch 1250, loss[loss=0.1625, simple_loss=0.2336, pruned_loss=0.04569, over 4177.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2438, pruned_loss=0.05024, over 953358.04 frames. ], batch size: 65, lr: 3.02e-03, grad_scale: 64.0 +2023-03-27 06:05:55,610 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138742.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:05:59,141 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138747.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:06:05,714 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138758.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:06:12,320 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.434e+02 1.700e+02 2.124e+02 3.591e+02, threshold=3.400e+02, percent-clipped=1.0 +2023-03-27 06:06:12,336 INFO [finetune.py:976] (5/7) Epoch 25, batch 1300, loss[loss=0.193, simple_loss=0.2601, pruned_loss=0.06298, over 4831.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2408, pruned_loss=0.0492, over 954306.21 frames. ], batch size: 33, lr: 3.02e-03, grad_scale: 64.0 +2023-03-27 06:06:57,378 INFO [finetune.py:976] (5/7) Epoch 25, batch 1350, loss[loss=0.206, simple_loss=0.2725, pruned_loss=0.06981, over 4908.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2416, pruned_loss=0.04954, over 956244.05 frames. ], batch size: 43, lr: 3.02e-03, grad_scale: 64.0 +2023-03-27 06:07:31,275 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.573e+02 1.999e+02 2.321e+02 4.595e+02, threshold=3.999e+02, percent-clipped=3.0 +2023-03-27 06:07:31,291 INFO [finetune.py:976] (5/7) Epoch 25, batch 1400, loss[loss=0.1639, simple_loss=0.2403, pruned_loss=0.0437, over 4790.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2457, pruned_loss=0.05073, over 956115.37 frames. ], batch size: 29, lr: 3.02e-03, grad_scale: 64.0 +2023-03-27 06:07:37,339 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4796, 1.4959, 1.5392, 0.9381, 1.6255, 1.8176, 1.8678, 1.4058], + device='cuda:5'), covar=tensor([0.0974, 0.0654, 0.0539, 0.0514, 0.0494, 0.0629, 0.0343, 0.0708], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0150, 0.0128, 0.0123, 0.0132, 0.0131, 0.0143, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.0075e-05, 1.0785e-04, 9.1346e-05, 8.6950e-05, 9.2839e-05, 9.2997e-05, + 1.0213e-04, 1.0668e-04], device='cuda:5') +2023-03-27 06:07:46,622 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4242, 1.4367, 1.7828, 1.7529, 1.7013, 3.3230, 1.4743, 1.5698], + device='cuda:5'), covar=tensor([0.1066, 0.1816, 0.1096, 0.0902, 0.1524, 0.0247, 0.1500, 0.1785], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:08:01,090 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138909.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:08:04,578 INFO [finetune.py:976] (5/7) Epoch 25, batch 1450, loss[loss=0.1962, simple_loss=0.2669, pruned_loss=0.06277, over 4811.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2457, pruned_loss=0.05056, over 956183.23 frames. ], batch size: 45, lr: 3.01e-03, grad_scale: 64.0 +2023-03-27 06:08:11,164 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0409, 2.7707, 2.5586, 1.4069, 2.6507, 2.1527, 2.1654, 2.4406], + device='cuda:5'), covar=tensor([0.0916, 0.0812, 0.1684, 0.2055, 0.1657, 0.2134, 0.1878, 0.1162], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0191, 0.0200, 0.0180, 0.0208, 0.0209, 0.0222, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:08:11,763 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138926.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:08:15,134 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3273, 1.4851, 1.5586, 0.8556, 1.5507, 1.7809, 1.8015, 1.4055], + device='cuda:5'), covar=tensor([0.1027, 0.0789, 0.0568, 0.0547, 0.0568, 0.0717, 0.0418, 0.0818], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0127, 0.0123, 0.0131, 0.0130, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9607e-05, 1.0709e-04, 9.0805e-05, 8.6405e-05, 9.2241e-05, 9.2393e-05, + 1.0150e-04, 1.0585e-04], device='cuda:5') +2023-03-27 06:08:23,891 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8272, 1.8451, 1.5652, 2.0518, 2.4833, 2.0378, 1.7795, 1.4561], + device='cuda:5'), covar=tensor([0.2527, 0.2136, 0.2283, 0.1788, 0.1846, 0.1276, 0.2539, 0.2251], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0213, 0.0216, 0.0199, 0.0245, 0.0192, 0.0218, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:08:38,071 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.768e+01 1.472e+02 1.793e+02 2.201e+02 3.947e+02, threshold=3.587e+02, percent-clipped=0.0 +2023-03-27 06:08:38,087 INFO [finetune.py:976] (5/7) Epoch 25, batch 1500, loss[loss=0.1773, simple_loss=0.2483, pruned_loss=0.05312, over 4910.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2475, pruned_loss=0.05074, over 956803.75 frames. ], batch size: 36, lr: 3.01e-03, grad_scale: 64.0 +2023-03-27 06:08:41,671 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138970.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 06:08:44,019 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=138974.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:08:46,500 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138978.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:09:23,191 INFO [finetune.py:976] (5/7) Epoch 25, batch 1550, loss[loss=0.1773, simple_loss=0.2647, pruned_loss=0.04492, over 4914.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2472, pruned_loss=0.05049, over 958807.95 frames. ], batch size: 37, lr: 3.01e-03, grad_scale: 64.0 +2023-03-27 06:09:34,902 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139026.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:09:47,267 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139037.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:09:50,294 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139042.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:09:57,456 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139053.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:10:02,892 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8671, 1.3213, 1.9403, 1.8794, 1.6849, 1.6284, 1.8650, 1.8199], + device='cuda:5'), covar=tensor([0.3700, 0.3606, 0.2804, 0.3277, 0.4227, 0.3496, 0.3754, 0.2776], + device='cuda:5'), in_proj_covar=tensor([0.0261, 0.0245, 0.0265, 0.0290, 0.0291, 0.0267, 0.0297, 0.0248], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:10:05,108 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.522e+01 1.375e+02 1.683e+02 2.077e+02 3.862e+02, threshold=3.366e+02, percent-clipped=3.0 +2023-03-27 06:10:05,124 INFO [finetune.py:976] (5/7) Epoch 25, batch 1600, loss[loss=0.2008, simple_loss=0.2684, pruned_loss=0.06657, over 4855.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2453, pruned_loss=0.05035, over 958786.69 frames. ], batch size: 31, lr: 3.01e-03, grad_scale: 64.0 +2023-03-27 06:10:38,952 INFO [finetune.py:976] (5/7) Epoch 25, batch 1650, loss[loss=0.1593, simple_loss=0.2302, pruned_loss=0.04421, over 4829.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.242, pruned_loss=0.04934, over 955684.19 frames. ], batch size: 41, lr: 3.01e-03, grad_scale: 64.0 +2023-03-27 06:11:12,571 INFO [finetune.py:976] (5/7) Epoch 25, batch 1700, loss[loss=0.1995, simple_loss=0.2658, pruned_loss=0.0666, over 4820.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.2417, pruned_loss=0.05, over 955290.78 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:11:13,177 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.831e+01 1.435e+02 1.759e+02 2.193e+02 3.727e+02, threshold=3.518e+02, percent-clipped=3.0 +2023-03-27 06:11:52,854 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2115, 2.3691, 1.9805, 2.1255, 2.7052, 2.8527, 2.4568, 2.2841], + device='cuda:5'), covar=tensor([0.0402, 0.0325, 0.0565, 0.0329, 0.0242, 0.0440, 0.0244, 0.0322], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0107, 0.0146, 0.0112, 0.0101, 0.0114, 0.0103, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7999e-05, 8.1901e-05, 1.1406e-04, 8.5684e-05, 7.8564e-05, 8.4341e-05, + 7.6164e-05, 8.5778e-05], device='cuda:5') +2023-03-27 06:11:56,395 INFO [finetune.py:976] (5/7) Epoch 25, batch 1750, loss[loss=0.2508, simple_loss=0.3018, pruned_loss=0.09993, over 4930.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2431, pruned_loss=0.05046, over 955190.50 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:12:20,693 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139238.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:12:22,471 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-03-27 06:12:31,181 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3954, 2.1754, 1.9717, 2.2411, 2.3593, 2.1373, 2.5423, 2.3894], + device='cuda:5'), covar=tensor([0.1172, 0.1759, 0.2641, 0.2120, 0.2297, 0.1461, 0.2458, 0.1569], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0189, 0.0234, 0.0253, 0.0249, 0.0204, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:12:39,405 INFO [finetune.py:976] (5/7) Epoch 25, batch 1800, loss[loss=0.1932, simple_loss=0.2699, pruned_loss=0.05821, over 4743.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2456, pruned_loss=0.05109, over 954022.76 frames. ], batch size: 54, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:12:39,474 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139265.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 06:12:39,956 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.974e+01 1.553e+02 1.833e+02 2.131e+02 4.022e+02, threshold=3.667e+02, percent-clipped=3.0 +2023-03-27 06:12:46,664 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139276.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:12:55,154 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139289.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:13:02,276 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139299.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:13:04,596 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.87 vs. limit=5.0 +2023-03-27 06:13:13,308 INFO [finetune.py:976] (5/7) Epoch 25, batch 1850, loss[loss=0.1983, simple_loss=0.2862, pruned_loss=0.05513, over 4816.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2457, pruned_loss=0.05044, over 954225.12 frames. ], batch size: 40, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:13:27,872 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139337.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:13:27,896 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139337.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 06:13:30,861 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139342.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:13:36,177 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139350.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:13:38,899 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139353.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:13:46,544 INFO [finetune.py:976] (5/7) Epoch 25, batch 1900, loss[loss=0.2075, simple_loss=0.2779, pruned_loss=0.06857, over 4892.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.2473, pruned_loss=0.05062, over 953874.71 frames. ], batch size: 32, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:13:46,658 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4796, 1.3704, 1.3859, 1.3562, 0.9058, 2.2032, 0.7700, 1.3862], + device='cuda:5'), covar=tensor([0.3339, 0.2629, 0.2239, 0.2658, 0.1883, 0.0378, 0.2739, 0.1304], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:13:47,140 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.007e+02 1.553e+02 1.835e+02 2.150e+02 3.557e+02, threshold=3.671e+02, percent-clipped=0.0 +2023-03-27 06:13:59,666 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139385.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:14:03,137 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139390.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:14:10,503 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139401.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:14:18,136 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4973, 1.2323, 2.0075, 3.1171, 2.0427, 2.3127, 1.0860, 2.8118], + device='cuda:5'), covar=tensor([0.1856, 0.1970, 0.1359, 0.0894, 0.0963, 0.1382, 0.1888, 0.0559], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0117, 0.0134, 0.0164, 0.0102, 0.0137, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 06:14:19,884 INFO [finetune.py:976] (5/7) Epoch 25, batch 1950, loss[loss=0.1358, simple_loss=0.2142, pruned_loss=0.02873, over 4923.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2467, pruned_loss=0.05076, over 952332.33 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:14:23,720 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-03-27 06:15:11,617 INFO [finetune.py:976] (5/7) Epoch 25, batch 2000, loss[loss=0.2246, simple_loss=0.285, pruned_loss=0.0821, over 4705.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.2441, pruned_loss=0.05038, over 953259.15 frames. ], batch size: 59, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:15:12,708 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.253e+01 1.362e+02 1.721e+02 2.187e+02 3.038e+02, threshold=3.442e+02, percent-clipped=0.0 +2023-03-27 06:15:15,224 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8208, 1.8171, 1.6270, 1.8870, 1.5061, 4.3244, 1.5884, 2.0425], + device='cuda:5'), covar=tensor([0.3139, 0.2397, 0.2083, 0.2315, 0.1528, 0.0122, 0.2524, 0.1215], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:15:39,154 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-03-27 06:15:45,238 INFO [finetune.py:976] (5/7) Epoch 25, batch 2050, loss[loss=0.1487, simple_loss=0.2117, pruned_loss=0.0429, over 4907.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2406, pruned_loss=0.04913, over 954107.32 frames. ], batch size: 32, lr: 3.01e-03, grad_scale: 32.0 +2023-03-27 06:16:18,436 INFO [finetune.py:976] (5/7) Epoch 25, batch 2100, loss[loss=0.1643, simple_loss=0.2468, pruned_loss=0.04086, over 4922.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2398, pruned_loss=0.04848, over 955242.65 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:16:18,552 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139565.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 06:16:20,121 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.352e+01 1.449e+02 1.714e+02 2.109e+02 3.824e+02, threshold=3.428e+02, percent-clipped=2.0 +2023-03-27 06:16:22,154 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1250, 1.8410, 2.2356, 2.2109, 1.9386, 1.8765, 2.1950, 2.1177], + device='cuda:5'), covar=tensor([0.4292, 0.4238, 0.3304, 0.4097, 0.5285, 0.4371, 0.5053, 0.3097], + device='cuda:5'), in_proj_covar=tensor([0.0263, 0.0246, 0.0266, 0.0291, 0.0291, 0.0268, 0.0297, 0.0249], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:16:23,223 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139571.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:16:30,376 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139582.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:16:38,029 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139594.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:16:50,723 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139613.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:16:52,380 INFO [finetune.py:976] (5/7) Epoch 25, batch 2150, loss[loss=0.2009, simple_loss=0.257, pruned_loss=0.07244, over 4334.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2431, pruned_loss=0.04961, over 954776.31 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:17:09,476 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2038, 3.6777, 3.8618, 4.0691, 3.9954, 3.7073, 4.3122, 1.3153], + device='cuda:5'), covar=tensor([0.0785, 0.0851, 0.1006, 0.0907, 0.1261, 0.1763, 0.0721, 0.5917], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0249, 0.0284, 0.0298, 0.0340, 0.0288, 0.0308, 0.0305], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:17:09,478 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139632.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 06:17:09,511 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139632.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:17:19,961 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 06:17:26,396 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139643.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:17:27,570 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139645.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:17:35,283 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.61 vs. limit=5.0 +2023-03-27 06:17:43,824 INFO [finetune.py:976] (5/7) Epoch 25, batch 2200, loss[loss=0.1703, simple_loss=0.2392, pruned_loss=0.05072, over 4838.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.2457, pruned_loss=0.05029, over 955444.33 frames. ], batch size: 49, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:17:45,448 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.512e+02 1.789e+02 2.111e+02 3.462e+02, threshold=3.578e+02, percent-clipped=1.0 +2023-03-27 06:18:03,281 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7866, 1.5060, 0.9649, 1.6285, 2.0432, 1.5266, 1.5783, 1.7159], + device='cuda:5'), covar=tensor([0.1356, 0.1816, 0.1690, 0.1100, 0.1941, 0.1819, 0.1326, 0.1793], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0109, 0.0092, 0.0119, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 06:18:17,070 INFO [finetune.py:976] (5/7) Epoch 25, batch 2250, loss[loss=0.1762, simple_loss=0.2402, pruned_loss=0.05616, over 4798.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2464, pruned_loss=0.05105, over 954650.50 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:18:46,361 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.85 vs. limit=5.0 +2023-03-27 06:18:50,824 INFO [finetune.py:976] (5/7) Epoch 25, batch 2300, loss[loss=0.1525, simple_loss=0.2175, pruned_loss=0.04378, over 4709.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.2458, pruned_loss=0.05019, over 954458.58 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:18:52,006 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.533e+02 1.822e+02 2.118e+02 3.916e+02, threshold=3.645e+02, percent-clipped=1.0 +2023-03-27 06:18:53,811 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139769.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:18:57,018 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.50 vs. limit=5.0 +2023-03-27 06:19:06,793 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139789.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:19:16,879 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139804.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:19:18,115 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139806.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:19:23,928 INFO [finetune.py:976] (5/7) Epoch 25, batch 2350, loss[loss=0.1724, simple_loss=0.2373, pruned_loss=0.05378, over 4931.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2441, pruned_loss=0.04977, over 956064.79 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:19:32,255 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2074, 2.1291, 1.7263, 2.2498, 2.1546, 1.9035, 2.5037, 2.2820], + device='cuda:5'), covar=tensor([0.1267, 0.2045, 0.2820, 0.2409, 0.2311, 0.1600, 0.2895, 0.1585], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0192, 0.0237, 0.0256, 0.0252, 0.0207, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:19:35,057 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139830.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:19:35,635 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9597, 1.9847, 2.0697, 1.4720, 2.1357, 2.2331, 2.1640, 1.6858], + device='cuda:5'), covar=tensor([0.0615, 0.0636, 0.0723, 0.0894, 0.0659, 0.0632, 0.0583, 0.1139], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0137, 0.0140, 0.0120, 0.0126, 0.0139, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:19:54,892 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139850.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:19:54,901 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139850.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:20:06,706 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-03-27 06:20:07,501 INFO [finetune.py:976] (5/7) Epoch 25, batch 2400, loss[loss=0.1545, simple_loss=0.2218, pruned_loss=0.04363, over 4850.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2412, pruned_loss=0.04873, over 957556.01 frames. ], batch size: 49, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:20:07,627 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139865.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:20:10,609 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.173e+01 1.419e+02 1.768e+02 2.081e+02 3.267e+02, threshold=3.536e+02, percent-clipped=0.0 +2023-03-27 06:20:10,746 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139867.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:20:35,948 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139894.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:20:36,001 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6838, 1.6234, 1.5051, 1.5295, 1.7484, 1.5100, 1.7693, 1.7007], + device='cuda:5'), covar=tensor([0.1239, 0.1770, 0.2556, 0.2148, 0.2164, 0.1485, 0.2266, 0.1563], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0192, 0.0238, 0.0256, 0.0252, 0.0207, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:20:47,283 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139911.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:20:50,037 INFO [finetune.py:976] (5/7) Epoch 25, batch 2450, loss[loss=0.1792, simple_loss=0.257, pruned_loss=0.05068, over 4820.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2392, pruned_loss=0.04859, over 955260.59 frames. ], batch size: 45, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:20:56,822 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3864, 2.3844, 2.0265, 2.5088, 2.2745, 2.2948, 2.3452, 3.2219], + device='cuda:5'), covar=tensor([0.3597, 0.4600, 0.3329, 0.3873, 0.4278, 0.2495, 0.3928, 0.1541], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0263, 0.0235, 0.0276, 0.0258, 0.0229, 0.0256, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:20:57,371 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139927.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:21:00,184 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8791, 3.8410, 3.6778, 1.8549, 3.9595, 2.9820, 0.9726, 2.7660], + device='cuda:5'), covar=tensor([0.2485, 0.2414, 0.1438, 0.3527, 0.0962, 0.1042, 0.4608, 0.1511], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0176, 0.0159, 0.0129, 0.0159, 0.0122, 0.0146, 0.0122], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 06:21:01,385 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139932.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 06:21:05,442 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139938.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:21:08,391 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139942.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:21:10,185 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139945.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:21:22,671 INFO [finetune.py:976] (5/7) Epoch 25, batch 2500, loss[loss=0.2146, simple_loss=0.2858, pruned_loss=0.07169, over 4827.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2407, pruned_loss=0.04936, over 956842.10 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:21:24,359 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.523e+02 1.884e+02 2.422e+02 3.755e+02, threshold=3.768e+02, percent-clipped=3.0 +2023-03-27 06:21:32,842 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139980.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:21:42,133 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=139993.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:21:57,548 INFO [finetune.py:976] (5/7) Epoch 25, batch 2550, loss[loss=0.2054, simple_loss=0.2704, pruned_loss=0.0702, over 4816.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2452, pruned_loss=0.05092, over 958390.76 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:21:59,029 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.57 vs. limit=5.0 +2023-03-27 06:22:14,732 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5972, 1.4845, 0.9749, 0.3519, 1.1841, 1.4501, 1.3518, 1.3327], + device='cuda:5'), covar=tensor([0.1137, 0.0904, 0.1682, 0.2033, 0.1664, 0.2400, 0.2646, 0.1131], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0191, 0.0199, 0.0180, 0.0209, 0.0210, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:22:23,221 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5755, 1.4625, 1.3234, 1.5439, 1.7938, 1.7121, 1.4899, 1.3248], + device='cuda:5'), covar=tensor([0.0334, 0.0343, 0.0635, 0.0301, 0.0220, 0.0475, 0.0368, 0.0429], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0107, 0.0146, 0.0112, 0.0101, 0.0115, 0.0103, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.8336e-05, 8.2170e-05, 1.1432e-04, 8.5760e-05, 7.8331e-05, 8.4773e-05, + 7.6559e-05, 8.5930e-05], device='cuda:5') +2023-03-27 06:22:36,079 INFO [finetune.py:976] (5/7) Epoch 25, batch 2600, loss[loss=0.2043, simple_loss=0.2791, pruned_loss=0.06473, over 4865.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2478, pruned_loss=0.05188, over 959039.45 frames. ], batch size: 34, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:22:42,049 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.019e+02 1.547e+02 1.969e+02 2.320e+02 4.703e+02, threshold=3.938e+02, percent-clipped=1.0 +2023-03-27 06:22:54,930 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-03-27 06:23:14,153 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140103.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:23:19,331 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140110.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:23:22,262 INFO [finetune.py:976] (5/7) Epoch 25, batch 2650, loss[loss=0.1828, simple_loss=0.2636, pruned_loss=0.05103, over 4862.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.2483, pruned_loss=0.05152, over 958629.27 frames. ], batch size: 34, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:23:27,148 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7041, 1.5589, 1.5089, 1.6421, 1.0273, 3.6614, 1.3443, 1.8139], + device='cuda:5'), covar=tensor([0.3188, 0.2515, 0.2145, 0.2418, 0.1794, 0.0160, 0.2548, 0.1280], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0112, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:23:28,792 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140125.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:23:41,822 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140145.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:23:51,792 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140160.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:23:53,483 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140162.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:23:55,132 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140164.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:23:55,634 INFO [finetune.py:976] (5/7) Epoch 25, batch 2700, loss[loss=0.1496, simple_loss=0.2194, pruned_loss=0.03991, over 4904.00 frames. ], tot_loss[loss=0.1758, simple_loss=0.2482, pruned_loss=0.05168, over 957392.13 frames. ], batch size: 36, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:23:56,851 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.003e+02 1.476e+02 1.708e+02 2.136e+02 4.297e+02, threshold=3.417e+02, percent-clipped=1.0 +2023-03-27 06:23:56,977 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4330, 1.4853, 1.9444, 1.7442, 1.5990, 3.4302, 1.4580, 1.6266], + device='cuda:5'), covar=tensor([0.1028, 0.1821, 0.1072, 0.0987, 0.1662, 0.0205, 0.1480, 0.1806], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:23:59,428 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140171.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:24:22,839 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140206.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:24:28,675 INFO [finetune.py:976] (5/7) Epoch 25, batch 2750, loss[loss=0.148, simple_loss=0.2027, pruned_loss=0.04669, over 4070.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2454, pruned_loss=0.05093, over 957583.55 frames. ], batch size: 17, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:24:30,172 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.11 vs. limit=5.0 +2023-03-27 06:24:36,544 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140227.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:24:43,704 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140238.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:24:50,137 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1859, 2.0459, 1.7117, 1.8872, 1.8912, 1.8581, 1.9438, 2.6843], + device='cuda:5'), covar=tensor([0.3497, 0.3627, 0.3167, 0.3568, 0.3583, 0.2544, 0.3391, 0.1549], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0235, 0.0276, 0.0259, 0.0229, 0.0257, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:24:58,272 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140259.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:25:01,787 INFO [finetune.py:976] (5/7) Epoch 25, batch 2800, loss[loss=0.1433, simple_loss=0.2171, pruned_loss=0.03477, over 4932.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2419, pruned_loss=0.04915, over 958676.88 frames. ], batch size: 38, lr: 3.01e-03, grad_scale: 16.0 +2023-03-27 06:25:02,939 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.021e+02 1.479e+02 1.751e+02 2.221e+02 3.486e+02, threshold=3.502e+02, percent-clipped=1.0 +2023-03-27 06:25:10,749 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140275.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:25:22,267 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140286.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:25:53,837 INFO [finetune.py:976] (5/7) Epoch 25, batch 2850, loss[loss=0.1377, simple_loss=0.2073, pruned_loss=0.0341, over 4761.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2398, pruned_loss=0.04824, over 955800.77 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:25:55,182 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3062, 2.2360, 2.0336, 2.5273, 2.9324, 2.3494, 2.4687, 1.7846], + device='cuda:5'), covar=tensor([0.2252, 0.2096, 0.1947, 0.1641, 0.1669, 0.1110, 0.1862, 0.1941], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0214, 0.0198, 0.0245, 0.0190, 0.0217, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:25:56,965 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140320.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:26:03,406 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4010, 1.3672, 1.9551, 1.6790, 1.5617, 3.3077, 1.2902, 1.5335], + device='cuda:5'), covar=tensor([0.0985, 0.1792, 0.1173, 0.0952, 0.1629, 0.0246, 0.1522, 0.1935], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:26:27,557 INFO [finetune.py:976] (5/7) Epoch 25, batch 2900, loss[loss=0.227, simple_loss=0.2792, pruned_loss=0.08734, over 4890.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.243, pruned_loss=0.0496, over 955531.80 frames. ], batch size: 32, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:26:28,762 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.105e+02 1.583e+02 1.866e+02 2.190e+02 4.311e+02, threshold=3.732e+02, percent-clipped=1.0 +2023-03-27 06:26:40,797 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6992, 1.6256, 1.5117, 1.6762, 1.4220, 3.6052, 1.4759, 2.0106], + device='cuda:5'), covar=tensor([0.3636, 0.2632, 0.2328, 0.2613, 0.1760, 0.0242, 0.2486, 0.1233], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0113, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:26:41,989 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140387.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:01,455 INFO [finetune.py:976] (5/7) Epoch 25, batch 2950, loss[loss=0.1646, simple_loss=0.2393, pruned_loss=0.04494, over 4750.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2451, pruned_loss=0.04999, over 955110.47 frames. ], batch size: 27, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:27:07,562 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140425.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:21,225 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140445.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:23,059 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140448.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:30,137 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140459.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:30,779 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140460.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:32,506 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140462.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:33,083 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.1733, 4.5269, 4.7153, 4.9786, 4.9151, 4.6584, 5.2842, 1.6371], + device='cuda:5'), covar=tensor([0.0704, 0.0842, 0.0708, 0.0855, 0.1289, 0.1470, 0.0541, 0.5920], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0249, 0.0283, 0.0297, 0.0339, 0.0287, 0.0308, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:27:34,688 INFO [finetune.py:976] (5/7) Epoch 25, batch 3000, loss[loss=0.183, simple_loss=0.2533, pruned_loss=0.0564, over 4904.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2468, pruned_loss=0.05069, over 955636.98 frames. ], batch size: 36, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:27:34,688 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 06:27:39,462 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8084, 3.4392, 3.5495, 3.7065, 3.5933, 3.4626, 3.8909, 1.3847], + device='cuda:5'), covar=tensor([0.0924, 0.0817, 0.0877, 0.1057, 0.1367, 0.1469, 0.0821, 0.5320], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0248, 0.0283, 0.0297, 0.0339, 0.0287, 0.0308, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:27:48,789 INFO [finetune.py:1010] (5/7) Epoch 25, validation: loss=0.1571, simple_loss=0.2254, pruned_loss=0.04443, over 2265189.00 frames. +2023-03-27 06:27:48,789 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 06:27:49,965 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140466.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:27:50,499 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.568e+02 1.888e+02 2.214e+02 4.503e+02, threshold=3.776e+02, percent-clipped=3.0 +2023-03-27 06:27:52,279 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4310, 1.4078, 1.0955, 1.3307, 1.7616, 1.6530, 1.4463, 1.2613], + device='cuda:5'), covar=tensor([0.0413, 0.0381, 0.0738, 0.0379, 0.0241, 0.0560, 0.0383, 0.0535], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0106, 0.0144, 0.0110, 0.0099, 0.0113, 0.0102, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7429e-05, 8.1216e-05, 1.1288e-04, 8.4412e-05, 7.7087e-05, 8.3714e-05, + 7.5899e-05, 8.4813e-05], device='cuda:5') +2023-03-27 06:27:59,536 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140473.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:28:11,484 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0705, 2.1356, 1.6645, 1.9621, 1.9513, 1.9181, 2.0032, 2.7226], + device='cuda:5'), covar=tensor([0.3945, 0.4163, 0.3549, 0.3959, 0.4262, 0.2562, 0.3988, 0.1721], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0235, 0.0276, 0.0259, 0.0229, 0.0256, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:28:17,955 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140489.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:28:20,351 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140493.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:28:29,172 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140506.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:28:30,345 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140508.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:28:31,003 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140509.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:28:31,588 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140510.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:28:34,543 INFO [finetune.py:976] (5/7) Epoch 25, batch 3050, loss[loss=0.1348, simple_loss=0.2236, pruned_loss=0.02297, over 4796.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2471, pruned_loss=0.05069, over 957349.10 frames. ], batch size: 29, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:28:58,618 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140550.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:29:00,943 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140554.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:29:08,053 INFO [finetune.py:976] (5/7) Epoch 25, batch 3100, loss[loss=0.1811, simple_loss=0.2559, pruned_loss=0.05321, over 4931.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2445, pruned_loss=0.04943, over 957777.63 frames. ], batch size: 38, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:29:09,243 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.313e+01 1.495e+02 1.767e+02 2.180e+02 4.499e+02, threshold=3.535e+02, percent-clipped=1.0 +2023-03-27 06:29:12,199 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140570.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:29:36,762 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6371, 1.6056, 2.0929, 3.1479, 2.1058, 2.4091, 1.2898, 2.6529], + device='cuda:5'), covar=tensor([0.1558, 0.1202, 0.1145, 0.0540, 0.0794, 0.1678, 0.1471, 0.0449], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0163, 0.0101, 0.0135, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 06:29:42,052 INFO [finetune.py:976] (5/7) Epoch 25, batch 3150, loss[loss=0.152, simple_loss=0.2247, pruned_loss=0.03968, over 4840.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.2429, pruned_loss=0.0495, over 958978.22 frames. ], batch size: 41, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:29:42,120 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140615.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:29:42,767 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3330, 1.4228, 1.8012, 1.6766, 1.5962, 3.1835, 1.4055, 1.6029], + device='cuda:5'), covar=tensor([0.0929, 0.1697, 0.0985, 0.0905, 0.1498, 0.0253, 0.1430, 0.1607], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:29:57,318 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-03-27 06:30:15,053 INFO [finetune.py:976] (5/7) Epoch 25, batch 3200, loss[loss=0.2229, simple_loss=0.2843, pruned_loss=0.0807, over 4730.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2399, pruned_loss=0.04919, over 957767.63 frames. ], batch size: 59, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:30:16,220 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.929e+01 1.486e+02 1.750e+02 2.144e+02 4.466e+02, threshold=3.500e+02, percent-clipped=2.0 +2023-03-27 06:30:56,981 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-27 06:31:06,560 INFO [finetune.py:976] (5/7) Epoch 25, batch 3250, loss[loss=0.1557, simple_loss=0.2393, pruned_loss=0.03604, over 4874.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2412, pruned_loss=0.05002, over 957187.40 frames. ], batch size: 34, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:31:11,486 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140723.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:31:25,167 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140743.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:31:35,353 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140759.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:31:39,361 INFO [finetune.py:976] (5/7) Epoch 25, batch 3300, loss[loss=0.2309, simple_loss=0.3037, pruned_loss=0.07899, over 4191.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2447, pruned_loss=0.0511, over 955069.15 frames. ], batch size: 65, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:31:40,540 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140766.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:31:41,064 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.042e+02 1.629e+02 1.945e+02 2.397e+02 4.021e+02, threshold=3.889e+02, percent-clipped=5.0 +2023-03-27 06:31:53,060 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140784.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:32:03,969 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1695, 2.3320, 2.8236, 2.5340, 2.5134, 4.8026, 2.4088, 2.5215], + device='cuda:5'), covar=tensor([0.0796, 0.1387, 0.0844, 0.0766, 0.1242, 0.0143, 0.1077, 0.1330], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:32:08,086 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140807.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:32:09,349 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140809.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:32:12,392 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140814.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:32:12,930 INFO [finetune.py:976] (5/7) Epoch 25, batch 3350, loss[loss=0.1992, simple_loss=0.2766, pruned_loss=0.06094, over 4274.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2467, pruned_loss=0.05194, over 952517.42 frames. ], batch size: 66, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:32:33,955 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140845.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:32:46,609 INFO [finetune.py:976] (5/7) Epoch 25, batch 3400, loss[loss=0.1772, simple_loss=0.2502, pruned_loss=0.05208, over 4702.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2466, pruned_loss=0.05139, over 952230.31 frames. ], batch size: 59, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:32:46,676 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140865.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:32:47,794 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.030e+02 1.564e+02 1.878e+02 2.236e+02 3.278e+02, threshold=3.756e+02, percent-clipped=0.0 +2023-03-27 06:32:49,711 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140870.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:33:01,378 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6538, 2.6343, 2.1717, 1.1116, 2.3428, 1.9796, 1.9888, 2.3895], + device='cuda:5'), covar=tensor([0.0871, 0.0684, 0.1559, 0.2045, 0.1252, 0.2382, 0.2116, 0.0897], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0191, 0.0199, 0.0180, 0.0209, 0.0210, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:33:08,693 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1218, 2.1995, 1.8342, 2.2329, 2.1439, 2.1254, 2.0880, 2.9324], + device='cuda:5'), covar=tensor([0.4033, 0.4842, 0.3376, 0.4419, 0.4334, 0.2509, 0.4460, 0.1611], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0263, 0.0236, 0.0276, 0.0259, 0.0229, 0.0256, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:33:37,444 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4222, 0.9952, 0.7722, 1.2788, 1.8794, 0.6886, 1.1755, 1.2317], + device='cuda:5'), covar=tensor([0.1526, 0.2336, 0.1695, 0.1315, 0.1941, 0.1917, 0.1613, 0.2127], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0109, 0.0092, 0.0119, 0.0093, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 06:33:39,160 INFO [finetune.py:976] (5/7) Epoch 25, batch 3450, loss[loss=0.1932, simple_loss=0.2653, pruned_loss=0.06056, over 4298.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2463, pruned_loss=0.05045, over 953209.76 frames. ], batch size: 65, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:33:39,277 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140915.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:33:40,457 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140917.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:33:45,376 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4070, 2.2981, 2.0992, 2.2737, 2.2400, 2.2612, 2.2405, 2.8732], + device='cuda:5'), covar=tensor([0.3221, 0.3819, 0.2909, 0.3119, 0.3289, 0.2423, 0.3424, 0.1611], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0263, 0.0236, 0.0277, 0.0259, 0.0229, 0.0256, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:34:01,687 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140947.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:34:09,588 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1246, 1.9897, 1.9930, 0.9411, 2.2677, 2.3874, 2.1290, 1.8433], + device='cuda:5'), covar=tensor([0.0927, 0.0837, 0.0574, 0.0721, 0.0488, 0.0678, 0.0545, 0.0737], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0128, 0.0123, 0.0131, 0.0130, 0.0142, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.9712e-05, 1.0688e-04, 9.1227e-05, 8.6412e-05, 9.1595e-05, 9.2132e-05, + 1.0092e-04, 1.0589e-04], device='cuda:5') +2023-03-27 06:34:11,813 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=140963.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:34:12,941 INFO [finetune.py:976] (5/7) Epoch 25, batch 3500, loss[loss=0.1522, simple_loss=0.2234, pruned_loss=0.04056, over 4818.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2432, pruned_loss=0.04966, over 953983.47 frames. ], batch size: 40, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:34:14,174 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.063e+02 1.468e+02 1.748e+02 2.204e+02 3.629e+02, threshold=3.496e+02, percent-clipped=0.0 +2023-03-27 06:34:20,902 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140978.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:34:41,924 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141008.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:34:46,049 INFO [finetune.py:976] (5/7) Epoch 25, batch 3550, loss[loss=0.1579, simple_loss=0.2286, pruned_loss=0.04356, over 4898.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.241, pruned_loss=0.04933, over 954107.60 frames. ], batch size: 36, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:35:04,135 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141043.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:35:04,782 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4317, 1.3567, 1.4778, 0.7701, 1.5122, 1.4832, 1.4617, 1.2832], + device='cuda:5'), covar=tensor([0.0658, 0.0816, 0.0725, 0.0917, 0.0885, 0.0741, 0.0649, 0.1301], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0136, 0.0140, 0.0119, 0.0126, 0.0137, 0.0139, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:35:19,345 INFO [finetune.py:976] (5/7) Epoch 25, batch 3600, loss[loss=0.1983, simple_loss=0.2534, pruned_loss=0.07163, over 4070.00 frames. ], tot_loss[loss=0.169, simple_loss=0.2396, pruned_loss=0.04926, over 954399.22 frames. ], batch size: 17, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:35:20,526 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.456e+02 1.796e+02 2.356e+02 3.995e+02, threshold=3.592e+02, percent-clipped=1.0 +2023-03-27 06:35:28,418 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141079.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:35:36,622 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141091.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:36:00,471 INFO [finetune.py:976] (5/7) Epoch 25, batch 3650, loss[loss=0.1953, simple_loss=0.2706, pruned_loss=0.05995, over 4716.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2427, pruned_loss=0.0503, over 954088.95 frames. ], batch size: 59, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:36:30,904 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141143.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:36:30,978 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.61 vs. limit=5.0 +2023-03-27 06:36:32,110 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141145.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:36:43,188 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3376, 1.2793, 1.6844, 1.7118, 1.4893, 3.1631, 1.2865, 1.3982], + device='cuda:5'), covar=tensor([0.1153, 0.2155, 0.1277, 0.1069, 0.1929, 0.0318, 0.1833, 0.2267], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:36:46,082 INFO [finetune.py:976] (5/7) Epoch 25, batch 3700, loss[loss=0.1774, simple_loss=0.2517, pruned_loss=0.05159, over 4906.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.247, pruned_loss=0.05118, over 956301.64 frames. ], batch size: 37, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:36:46,155 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141165.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:36:46,172 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141165.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:36:47,287 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.158e+02 1.708e+02 2.029e+02 2.382e+02 3.628e+02, threshold=4.058e+02, percent-clipped=1.0 +2023-03-27 06:36:53,405 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4393, 1.3742, 1.4682, 0.7376, 1.4729, 1.4379, 1.3982, 1.2679], + device='cuda:5'), covar=tensor([0.0659, 0.0843, 0.0798, 0.1025, 0.0886, 0.0783, 0.0713, 0.1382], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0137, 0.0141, 0.0120, 0.0127, 0.0138, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:37:03,880 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141193.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:37:11,713 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141204.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:37:18,589 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141213.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:37:19,743 INFO [finetune.py:976] (5/7) Epoch 25, batch 3750, loss[loss=0.1944, simple_loss=0.2714, pruned_loss=0.05867, over 4808.00 frames. ], tot_loss[loss=0.1759, simple_loss=0.2487, pruned_loss=0.05162, over 955527.12 frames. ], batch size: 40, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:37:26,454 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3163, 1.5876, 1.6947, 0.7850, 1.6506, 1.9066, 1.8638, 1.5906], + device='cuda:5'), covar=tensor([0.0932, 0.0717, 0.0470, 0.0586, 0.0423, 0.0603, 0.0349, 0.0705], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0149, 0.0128, 0.0123, 0.0131, 0.0131, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.0131e-05, 1.0719e-04, 9.1655e-05, 8.6892e-05, 9.1973e-05, 9.2860e-05, + 1.0143e-04, 1.0653e-04], device='cuda:5') +2023-03-27 06:37:40,000 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5634, 1.4494, 1.5750, 0.7934, 1.6682, 1.8562, 1.6972, 1.4504], + device='cuda:5'), covar=tensor([0.1245, 0.1232, 0.0654, 0.0780, 0.0610, 0.0835, 0.0550, 0.0871], + device='cuda:5'), in_proj_covar=tensor([0.0124, 0.0149, 0.0129, 0.0123, 0.0132, 0.0131, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([9.0137e-05, 1.0728e-04, 9.1747e-05, 8.6897e-05, 9.2074e-05, 9.2936e-05, + 1.0153e-04, 1.0669e-04], device='cuda:5') +2023-03-27 06:37:52,662 INFO [finetune.py:976] (5/7) Epoch 25, batch 3800, loss[loss=0.1743, simple_loss=0.2483, pruned_loss=0.05017, over 4880.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.2497, pruned_loss=0.052, over 956729.25 frames. ], batch size: 43, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:37:54,343 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.018e+02 1.508e+02 1.827e+02 2.217e+02 6.513e+02, threshold=3.654e+02, percent-clipped=2.0 +2023-03-27 06:37:58,030 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141273.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:38:19,446 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141303.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:38:32,079 INFO [finetune.py:976] (5/7) Epoch 25, batch 3850, loss[loss=0.1348, simple_loss=0.207, pruned_loss=0.03126, over 4857.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2466, pruned_loss=0.05073, over 956305.36 frames. ], batch size: 31, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:38:49,948 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141329.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:39:01,796 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.56 vs. limit=5.0 +2023-03-27 06:39:16,918 INFO [finetune.py:976] (5/7) Epoch 25, batch 3900, loss[loss=0.1468, simple_loss=0.2171, pruned_loss=0.03827, over 4932.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.2447, pruned_loss=0.05007, over 958326.57 frames. ], batch size: 38, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:39:18,105 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.088e+02 1.504e+02 1.773e+02 2.110e+02 6.012e+02, threshold=3.546e+02, percent-clipped=1.0 +2023-03-27 06:39:26,401 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141379.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:39:33,595 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141390.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:39:49,667 INFO [finetune.py:976] (5/7) Epoch 25, batch 3950, loss[loss=0.165, simple_loss=0.231, pruned_loss=0.0495, over 4907.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2419, pruned_loss=0.0494, over 959344.62 frames. ], batch size: 43, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:39:51,523 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141417.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 06:39:58,916 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141427.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:40:07,823 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2694, 2.9323, 3.0716, 3.2189, 3.0719, 2.8314, 3.3361, 0.9870], + device='cuda:5'), covar=tensor([0.1247, 0.1056, 0.1152, 0.1263, 0.1843, 0.2026, 0.1229, 0.6022], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0248, 0.0283, 0.0296, 0.0338, 0.0288, 0.0309, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:40:07,857 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141441.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:40:23,346 INFO [finetune.py:976] (5/7) Epoch 25, batch 4000, loss[loss=0.132, simple_loss=0.2142, pruned_loss=0.0249, over 4762.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2414, pruned_loss=0.0498, over 957911.27 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:40:23,426 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141465.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:40:24,523 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.071e+02 1.514e+02 1.750e+02 2.113e+02 3.817e+02, threshold=3.500e+02, percent-clipped=1.0 +2023-03-27 06:40:33,189 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141478.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 06:40:46,342 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141499.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:40:48,233 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141502.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:40:55,311 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141513.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:40:56,984 INFO [finetune.py:976] (5/7) Epoch 25, batch 4050, loss[loss=0.1631, simple_loss=0.2451, pruned_loss=0.04054, over 4833.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.244, pruned_loss=0.0509, over 956335.75 frames. ], batch size: 47, lr: 3.00e-03, grad_scale: 16.0 +2023-03-27 06:41:05,096 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-03-27 06:41:25,995 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-03-27 06:41:49,013 INFO [finetune.py:976] (5/7) Epoch 25, batch 4100, loss[loss=0.2006, simple_loss=0.2638, pruned_loss=0.0687, over 4921.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2457, pruned_loss=0.05133, over 954450.01 frames. ], batch size: 42, lr: 3.00e-03, grad_scale: 32.0 +2023-03-27 06:41:50,184 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.605e+02 1.887e+02 2.173e+02 5.231e+02, threshold=3.774e+02, percent-clipped=3.0 +2023-03-27 06:41:54,398 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141573.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:42:09,923 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0540, 1.4405, 0.7862, 1.8537, 2.3933, 1.8118, 1.6718, 1.8562], + device='cuda:5'), covar=tensor([0.1376, 0.1949, 0.1912, 0.1134, 0.1839, 0.1742, 0.1361, 0.1822], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0093, 0.0120, 0.0094, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 06:42:14,780 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141603.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:42:22,404 INFO [finetune.py:976] (5/7) Epoch 25, batch 4150, loss[loss=0.1552, simple_loss=0.2219, pruned_loss=0.04428, over 4689.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2449, pruned_loss=0.05055, over 950183.52 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 32.0 +2023-03-27 06:42:26,083 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141621.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:42:35,289 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.79 vs. limit=5.0 +2023-03-27 06:42:46,646 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141651.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:42:55,970 INFO [finetune.py:976] (5/7) Epoch 25, batch 4200, loss[loss=0.1301, simple_loss=0.2173, pruned_loss=0.0215, over 4839.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2454, pruned_loss=0.05, over 951998.01 frames. ], batch size: 49, lr: 3.00e-03, grad_scale: 32.0 +2023-03-27 06:42:57,195 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.937e+01 1.553e+02 1.832e+02 2.223e+02 5.119e+02, threshold=3.664e+02, percent-clipped=2.0 +2023-03-27 06:43:09,627 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141685.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:43:21,747 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-27 06:43:29,320 INFO [finetune.py:976] (5/7) Epoch 25, batch 4250, loss[loss=0.1972, simple_loss=0.259, pruned_loss=0.0677, over 4729.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2439, pruned_loss=0.04986, over 952423.03 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 32.0 +2023-03-27 06:43:59,996 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.34 vs. limit=5.0 +2023-03-27 06:44:20,813 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 06:44:21,246 INFO [finetune.py:976] (5/7) Epoch 25, batch 4300, loss[loss=0.173, simple_loss=0.2443, pruned_loss=0.05086, over 4917.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.2405, pruned_loss=0.04839, over 953144.80 frames. ], batch size: 36, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:44:22,424 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.814e+01 1.357e+02 1.630e+02 2.025e+02 3.929e+02, threshold=3.260e+02, percent-clipped=1.0 +2023-03-27 06:44:26,631 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141773.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 06:44:43,625 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141797.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:44:44,842 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141799.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:44:53,468 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5444, 1.4311, 1.3014, 1.6236, 1.6270, 1.5910, 1.0472, 1.3335], + device='cuda:5'), covar=tensor([0.2154, 0.2109, 0.2062, 0.1684, 0.1551, 0.1301, 0.2427, 0.1894], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0214, 0.0198, 0.0245, 0.0191, 0.0216, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:44:54,720 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3393, 2.1560, 1.8075, 2.0073, 2.2878, 1.9742, 2.4665, 2.3283], + device='cuda:5'), covar=tensor([0.1313, 0.1842, 0.2845, 0.2644, 0.2390, 0.1615, 0.2699, 0.1721], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0191, 0.0236, 0.0255, 0.0250, 0.0206, 0.0215, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:44:55,201 INFO [finetune.py:976] (5/7) Epoch 25, batch 4350, loss[loss=0.1417, simple_loss=0.2191, pruned_loss=0.03218, over 4793.00 frames. ], tot_loss[loss=0.1675, simple_loss=0.2386, pruned_loss=0.04818, over 954876.65 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:45:17,473 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=141847.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:45:28,845 INFO [finetune.py:976] (5/7) Epoch 25, batch 4400, loss[loss=0.1479, simple_loss=0.2383, pruned_loss=0.02876, over 4769.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2407, pruned_loss=0.04941, over 955862.53 frames. ], batch size: 59, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:45:30,036 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.038e+02 1.595e+02 1.814e+02 2.202e+02 4.275e+02, threshold=3.628e+02, percent-clipped=6.0 +2023-03-27 06:46:01,880 INFO [finetune.py:976] (5/7) Epoch 25, batch 4450, loss[loss=0.1593, simple_loss=0.2319, pruned_loss=0.0434, over 4885.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2443, pruned_loss=0.0504, over 956703.17 frames. ], batch size: 32, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:46:02,596 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141916.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:46:10,449 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141928.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:46:46,893 INFO [finetune.py:976] (5/7) Epoch 25, batch 4500, loss[loss=0.1826, simple_loss=0.2447, pruned_loss=0.06029, over 4823.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2461, pruned_loss=0.0508, over 956066.97 frames. ], batch size: 30, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:46:52,643 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.869e+01 1.594e+02 1.826e+02 2.236e+02 4.959e+02, threshold=3.653e+02, percent-clipped=2.0 +2023-03-27 06:47:02,927 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141977.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:47:08,191 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141985.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:47:10,694 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141989.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:47:30,044 INFO [finetune.py:976] (5/7) Epoch 25, batch 4550, loss[loss=0.1875, simple_loss=0.2613, pruned_loss=0.0569, over 4894.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2467, pruned_loss=0.05045, over 956455.65 frames. ], batch size: 43, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:47:41,390 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=142033.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:47:43,852 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8705, 1.4881, 1.9582, 1.9103, 1.6988, 1.6636, 1.8830, 1.8434], + device='cuda:5'), covar=tensor([0.4205, 0.4079, 0.3343, 0.3795, 0.4833, 0.3879, 0.4437, 0.3082], + device='cuda:5'), in_proj_covar=tensor([0.0264, 0.0247, 0.0267, 0.0293, 0.0293, 0.0269, 0.0300, 0.0250], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:48:03,345 INFO [finetune.py:976] (5/7) Epoch 25, batch 4600, loss[loss=0.1172, simple_loss=0.2002, pruned_loss=0.01714, over 4757.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2462, pruned_loss=0.05027, over 956488.57 frames. ], batch size: 28, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:48:04,588 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.716e+01 1.569e+02 1.799e+02 2.271e+02 3.318e+02, threshold=3.598e+02, percent-clipped=0.0 +2023-03-27 06:48:08,723 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142073.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 06:48:23,762 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142097.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:48:36,591 INFO [finetune.py:976] (5/7) Epoch 25, batch 4650, loss[loss=0.1541, simple_loss=0.2253, pruned_loss=0.04146, over 4824.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2432, pruned_loss=0.04976, over 956460.99 frames. ], batch size: 38, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:48:40,334 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=142121.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 06:48:57,213 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=142145.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:49:19,997 INFO [finetune.py:976] (5/7) Epoch 25, batch 4700, loss[loss=0.1706, simple_loss=0.2343, pruned_loss=0.0535, over 4330.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.241, pruned_loss=0.04927, over 956214.24 frames. ], batch size: 65, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:49:21,183 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.163e+01 1.384e+02 1.765e+02 2.088e+02 3.764e+02, threshold=3.531e+02, percent-clipped=1.0 +2023-03-27 06:49:45,562 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4679, 1.4070, 1.2953, 1.5877, 1.5528, 1.5577, 0.9791, 1.3208], + device='cuda:5'), covar=tensor([0.1855, 0.1765, 0.1602, 0.1344, 0.1384, 0.1038, 0.2173, 0.1690], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0213, 0.0216, 0.0199, 0.0246, 0.0192, 0.0218, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:49:47,318 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-27 06:50:00,900 INFO [finetune.py:976] (5/7) Epoch 25, batch 4750, loss[loss=0.1688, simple_loss=0.2506, pruned_loss=0.04353, over 4781.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.24, pruned_loss=0.04945, over 956211.47 frames. ], batch size: 28, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:50:14,464 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.36 vs. limit=5.0 +2023-03-27 06:50:22,292 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-27 06:50:34,333 INFO [finetune.py:976] (5/7) Epoch 25, batch 4800, loss[loss=0.189, simple_loss=0.272, pruned_loss=0.05298, over 4853.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2426, pruned_loss=0.05058, over 955225.94 frames. ], batch size: 44, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:50:35,546 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.947e+01 1.535e+02 1.762e+02 2.238e+02 3.446e+02, threshold=3.524e+02, percent-clipped=1.0 +2023-03-27 06:50:39,630 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142272.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:50:47,488 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142284.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:50:52,412 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0840, 2.0144, 1.8866, 2.2861, 2.6398, 2.2568, 1.9288, 1.8061], + device='cuda:5'), covar=tensor([0.2241, 0.1986, 0.2023, 0.1582, 0.1529, 0.1225, 0.2207, 0.2069], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0213, 0.0217, 0.0200, 0.0247, 0.0193, 0.0219, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:51:07,516 INFO [finetune.py:976] (5/7) Epoch 25, batch 4850, loss[loss=0.178, simple_loss=0.2456, pruned_loss=0.05519, over 4919.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2458, pruned_loss=0.05131, over 955715.95 frames. ], batch size: 38, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:51:39,148 INFO [finetune.py:976] (5/7) Epoch 25, batch 4900, loss[loss=0.1709, simple_loss=0.2486, pruned_loss=0.04658, over 4910.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.2476, pruned_loss=0.05161, over 957083.12 frames. ], batch size: 38, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:51:40,868 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.551e+02 1.812e+02 2.135e+02 6.918e+02, threshold=3.624e+02, percent-clipped=2.0 +2023-03-27 06:52:31,146 INFO [finetune.py:976] (5/7) Epoch 25, batch 4950, loss[loss=0.1783, simple_loss=0.2553, pruned_loss=0.05063, over 4908.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2483, pruned_loss=0.05189, over 957467.73 frames. ], batch size: 36, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:52:56,031 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2089, 2.0608, 2.2084, 1.4892, 2.1088, 2.2660, 2.2810, 1.7013], + device='cuda:5'), covar=tensor([0.0490, 0.0606, 0.0650, 0.0841, 0.0668, 0.0620, 0.0541, 0.1177], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0137, 0.0141, 0.0120, 0.0127, 0.0138, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:53:03,771 INFO [finetune.py:976] (5/7) Epoch 25, batch 5000, loss[loss=0.1347, simple_loss=0.2087, pruned_loss=0.03036, over 4870.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2455, pruned_loss=0.05064, over 956818.12 frames. ], batch size: 34, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:53:04,978 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.190e+01 1.432e+02 1.813e+02 2.155e+02 3.992e+02, threshold=3.625e+02, percent-clipped=1.0 +2023-03-27 06:53:36,415 INFO [finetune.py:976] (5/7) Epoch 25, batch 5050, loss[loss=0.1509, simple_loss=0.2196, pruned_loss=0.04109, over 4926.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2429, pruned_loss=0.05021, over 957026.48 frames. ], batch size: 37, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:53:57,180 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.62 vs. limit=5.0 +2023-03-27 06:54:09,847 INFO [finetune.py:976] (5/7) Epoch 25, batch 5100, loss[loss=0.1732, simple_loss=0.2379, pruned_loss=0.05423, over 4895.00 frames. ], tot_loss[loss=0.169, simple_loss=0.2392, pruned_loss=0.04938, over 958092.27 frames. ], batch size: 32, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:54:11,048 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.212e+01 1.519e+02 1.807e+02 2.247e+02 4.075e+02, threshold=3.613e+02, percent-clipped=2.0 +2023-03-27 06:54:14,211 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142572.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:54:14,851 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142573.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:54:27,741 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142584.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:54:49,927 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0464, 1.2328, 1.2268, 1.2633, 1.3762, 2.4483, 1.2388, 1.3630], + device='cuda:5'), covar=tensor([0.0992, 0.1873, 0.1166, 0.0954, 0.1674, 0.0379, 0.1505, 0.1837], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0072, 0.0075, 0.0090, 0.0080, 0.0084, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 06:54:51,230 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 06:54:59,673 INFO [finetune.py:976] (5/7) Epoch 25, batch 5150, loss[loss=0.1754, simple_loss=0.2314, pruned_loss=0.05972, over 4570.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.2409, pruned_loss=0.05039, over 957813.57 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:55:01,638 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 06:55:03,301 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=142620.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:55:06,200 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.53 vs. limit=5.0 +2023-03-27 06:55:10,594 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=142632.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:55:12,819 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142634.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:55:14,930 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.64 vs. limit=5.0 +2023-03-27 06:55:19,430 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-03-27 06:55:33,011 INFO [finetune.py:976] (5/7) Epoch 25, batch 5200, loss[loss=0.1695, simple_loss=0.2464, pruned_loss=0.0463, over 4902.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2446, pruned_loss=0.0511, over 957593.11 frames. ], batch size: 37, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:55:34,193 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.563e+02 1.762e+02 2.093e+02 3.679e+02, threshold=3.523e+02, percent-clipped=1.0 +2023-03-27 06:56:06,165 INFO [finetune.py:976] (5/7) Epoch 25, batch 5250, loss[loss=0.1736, simple_loss=0.2431, pruned_loss=0.05205, over 4798.00 frames. ], tot_loss[loss=0.175, simple_loss=0.2464, pruned_loss=0.05178, over 955804.79 frames. ], batch size: 40, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:56:12,122 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142724.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:56:39,093 INFO [finetune.py:976] (5/7) Epoch 25, batch 5300, loss[loss=0.2347, simple_loss=0.2986, pruned_loss=0.08534, over 4837.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2484, pruned_loss=0.05226, over 954826.46 frames. ], batch size: 44, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:56:40,273 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.031e+02 1.558e+02 1.826e+02 2.127e+02 3.045e+02, threshold=3.651e+02, percent-clipped=0.0 +2023-03-27 06:56:51,745 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142785.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:57:20,000 INFO [finetune.py:976] (5/7) Epoch 25, batch 5350, loss[loss=0.1515, simple_loss=0.2282, pruned_loss=0.0374, over 4689.00 frames. ], tot_loss[loss=0.176, simple_loss=0.2484, pruned_loss=0.0518, over 956582.53 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:57:52,542 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8997, 1.7936, 1.5173, 1.4116, 1.8916, 1.6435, 1.8490, 1.8676], + device='cuda:5'), covar=tensor([0.1548, 0.1970, 0.3198, 0.2570, 0.2701, 0.1830, 0.2737, 0.1825], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0190, 0.0235, 0.0254, 0.0249, 0.0206, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:58:06,041 INFO [finetune.py:976] (5/7) Epoch 25, batch 5400, loss[loss=0.1588, simple_loss=0.2318, pruned_loss=0.04291, over 4762.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2455, pruned_loss=0.05062, over 957088.29 frames. ], batch size: 28, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:58:07,256 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.487e+02 1.682e+02 2.190e+02 4.832e+02, threshold=3.364e+02, percent-clipped=1.0 +2023-03-27 06:58:10,394 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6459, 1.5181, 1.3579, 1.7351, 1.9893, 1.6797, 1.3660, 1.3656], + device='cuda:5'), covar=tensor([0.2292, 0.2134, 0.2120, 0.1737, 0.1701, 0.1401, 0.2624, 0.2099], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0216, 0.0199, 0.0245, 0.0193, 0.0218, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:58:17,145 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 06:58:18,132 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8205, 1.7476, 1.5335, 1.9464, 2.2151, 1.9106, 1.5863, 1.4895], + device='cuda:5'), covar=tensor([0.2137, 0.1904, 0.1890, 0.1680, 0.1686, 0.1212, 0.2296, 0.2000], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0216, 0.0199, 0.0246, 0.0193, 0.0218, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:58:21,853 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.19 vs. limit=5.0 +2023-03-27 06:58:38,660 INFO [finetune.py:976] (5/7) Epoch 25, batch 5450, loss[loss=0.1664, simple_loss=0.2378, pruned_loss=0.04748, over 4876.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2427, pruned_loss=0.05012, over 956137.07 frames. ], batch size: 31, lr: 2.99e-03, grad_scale: 32.0 +2023-03-27 06:58:47,647 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142929.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 06:59:08,821 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-03-27 06:59:09,008 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8651, 1.7122, 2.0723, 1.3779, 1.8661, 2.0901, 1.6161, 2.1490], + device='cuda:5'), covar=tensor([0.1417, 0.2169, 0.1638, 0.1928, 0.0955, 0.1460, 0.2720, 0.0913], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0204, 0.0190, 0.0188, 0.0172, 0.0211, 0.0214, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 06:59:11,890 INFO [finetune.py:976] (5/7) Epoch 25, batch 5500, loss[loss=0.1544, simple_loss=0.2203, pruned_loss=0.0442, over 4160.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2399, pruned_loss=0.04935, over 953507.50 frames. ], batch size: 18, lr: 2.99e-03, grad_scale: 16.0 +2023-03-27 06:59:12,091 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-03-27 06:59:13,717 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.198e+01 1.372e+02 1.708e+02 2.223e+02 4.314e+02, threshold=3.415e+02, percent-clipped=3.0 +2023-03-27 06:59:46,280 INFO [finetune.py:976] (5/7) Epoch 25, batch 5550, loss[loss=0.2101, simple_loss=0.2842, pruned_loss=0.06797, over 4822.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.241, pruned_loss=0.04994, over 950705.49 frames. ], batch size: 40, lr: 2.99e-03, grad_scale: 16.0 +2023-03-27 07:00:29,925 INFO [finetune.py:976] (5/7) Epoch 25, batch 5600, loss[loss=0.1831, simple_loss=0.2554, pruned_loss=0.05537, over 4762.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2462, pruned_loss=0.05159, over 952312.63 frames. ], batch size: 26, lr: 2.99e-03, grad_scale: 16.0 +2023-03-27 07:00:31,669 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.665e+01 1.700e+02 1.937e+02 2.306e+02 4.675e+02, threshold=3.875e+02, percent-clipped=1.0 +2023-03-27 07:00:38,743 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143080.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:00:54,346 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7851, 1.2011, 1.8170, 1.7984, 1.6048, 1.5559, 1.7782, 1.7492], + device='cuda:5'), covar=tensor([0.3541, 0.3507, 0.2767, 0.3033, 0.3980, 0.3476, 0.3561, 0.2609], + device='cuda:5'), in_proj_covar=tensor([0.0263, 0.0246, 0.0265, 0.0291, 0.0292, 0.0269, 0.0298, 0.0249], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:01:00,342 INFO [finetune.py:976] (5/7) Epoch 25, batch 5650, loss[loss=0.1583, simple_loss=0.2414, pruned_loss=0.03764, over 4873.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2489, pruned_loss=0.05197, over 954284.12 frames. ], batch size: 34, lr: 2.99e-03, grad_scale: 16.0 +2023-03-27 07:01:29,842 INFO [finetune.py:976] (5/7) Epoch 25, batch 5700, loss[loss=0.1515, simple_loss=0.209, pruned_loss=0.047, over 4253.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.2444, pruned_loss=0.05089, over 932327.24 frames. ], batch size: 18, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:01:31,570 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.213e+01 1.339e+02 1.671e+02 2.043e+02 4.216e+02, threshold=3.342e+02, percent-clipped=1.0 +2023-03-27 07:01:36,377 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2577, 1.9767, 2.4184, 1.6870, 2.0726, 2.4883, 1.8931, 2.4689], + device='cuda:5'), covar=tensor([0.1021, 0.1890, 0.1143, 0.1575, 0.0951, 0.1003, 0.2426, 0.0763], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0205, 0.0190, 0.0189, 0.0173, 0.0212, 0.0215, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:01:58,299 INFO [finetune.py:976] (5/7) Epoch 26, batch 0, loss[loss=0.1922, simple_loss=0.2572, pruned_loss=0.06361, over 4192.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2572, pruned_loss=0.06361, over 4192.00 frames. ], batch size: 66, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:01:58,299 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 07:02:01,188 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8441, 1.7495, 2.0587, 1.3946, 1.7224, 2.0699, 1.7102, 2.1850], + device='cuda:5'), covar=tensor([0.1258, 0.2316, 0.1201, 0.1729, 0.1055, 0.1333, 0.3029, 0.0863], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0205, 0.0190, 0.0190, 0.0173, 0.0212, 0.0215, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:02:03,225 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5636, 1.3463, 1.3464, 1.4392, 1.7082, 1.6800, 1.4344, 1.3393], + device='cuda:5'), covar=tensor([0.0516, 0.0383, 0.0655, 0.0395, 0.0318, 0.0440, 0.0395, 0.0478], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0107, 0.0146, 0.0112, 0.0102, 0.0116, 0.0104, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.8500e-05, 8.1959e-05, 1.1422e-04, 8.5531e-05, 7.8734e-05, 8.5958e-05, + 7.7183e-05, 8.6065e-05], device='cuda:5') +2023-03-27 07:02:06,849 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1847, 2.0585, 2.0109, 1.8330, 2.0215, 2.1427, 2.0847, 2.6628], + device='cuda:5'), covar=tensor([0.3629, 0.4398, 0.3261, 0.3410, 0.3851, 0.2448, 0.3446, 0.1841], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0265, 0.0237, 0.0278, 0.0260, 0.0230, 0.0257, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:02:14,280 INFO [finetune.py:1010] (5/7) Epoch 26, validation: loss=0.1591, simple_loss=0.2269, pruned_loss=0.04565, over 2265189.00 frames. +2023-03-27 07:02:14,281 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 07:02:43,898 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143229.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:03:00,612 INFO [finetune.py:976] (5/7) Epoch 26, batch 50, loss[loss=0.1976, simple_loss=0.269, pruned_loss=0.06312, over 4908.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2465, pruned_loss=0.05088, over 217647.43 frames. ], batch size: 46, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:03:13,064 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.6513, 1.7181, 1.6788, 0.9505, 1.9134, 2.0522, 1.9470, 1.5655], + device='cuda:5'), covar=tensor([0.0901, 0.0674, 0.0601, 0.0594, 0.0364, 0.0537, 0.0337, 0.0689], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0128, 0.0122, 0.0130, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.8943e-05, 1.0601e-04, 9.1068e-05, 8.5568e-05, 9.0961e-05, 9.1380e-05, + 1.0051e-04, 1.0563e-04], device='cuda:5') +2023-03-27 07:03:18,453 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.036e+02 1.460e+02 1.766e+02 2.058e+02 4.416e+02, threshold=3.532e+02, percent-clipped=3.0 +2023-03-27 07:03:19,198 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2996, 2.0816, 1.7758, 2.0618, 2.1955, 1.9434, 2.4515, 2.2294], + device='cuda:5'), covar=tensor([0.1359, 0.2183, 0.3077, 0.2464, 0.2733, 0.1746, 0.2767, 0.1691], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0191, 0.0237, 0.0255, 0.0251, 0.0207, 0.0215, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:03:24,478 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=143277.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:03:30,582 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4876, 2.3286, 1.9987, 2.4585, 2.4190, 2.1158, 2.7054, 2.5044], + device='cuda:5'), covar=tensor([0.1381, 0.1955, 0.2891, 0.2401, 0.2623, 0.1802, 0.2523, 0.1876], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0191, 0.0237, 0.0255, 0.0252, 0.0208, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:03:34,113 INFO [finetune.py:976] (5/7) Epoch 26, batch 100, loss[loss=0.1317, simple_loss=0.2078, pruned_loss=0.02783, over 4753.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.242, pruned_loss=0.04919, over 381656.17 frames. ], batch size: 26, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:03:38,740 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1016, 1.7885, 2.2833, 1.5835, 1.9995, 2.3545, 1.7137, 2.3847], + device='cuda:5'), covar=tensor([0.1129, 0.2168, 0.1443, 0.1759, 0.1039, 0.1249, 0.2950, 0.0850], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0205, 0.0191, 0.0190, 0.0174, 0.0213, 0.0216, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:04:07,508 INFO [finetune.py:976] (5/7) Epoch 26, batch 150, loss[loss=0.1441, simple_loss=0.2197, pruned_loss=0.03428, over 4898.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.238, pruned_loss=0.04926, over 506875.35 frames. ], batch size: 35, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:04:20,290 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7667, 3.7934, 3.6336, 1.6267, 3.9653, 2.8914, 0.8765, 2.7576], + device='cuda:5'), covar=tensor([0.2195, 0.1763, 0.1469, 0.3467, 0.0884, 0.0928, 0.4368, 0.1388], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0178, 0.0161, 0.0130, 0.0160, 0.0123, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 07:04:25,695 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.744e+01 1.335e+02 1.679e+02 2.114e+02 2.886e+02, threshold=3.358e+02, percent-clipped=0.0 +2023-03-27 07:04:33,624 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143380.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:04:41,259 INFO [finetune.py:976] (5/7) Epoch 26, batch 200, loss[loss=0.1862, simple_loss=0.2604, pruned_loss=0.05599, over 4910.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2383, pruned_loss=0.04912, over 607064.28 frames. ], batch size: 36, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:04:46,871 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143400.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:05:05,299 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=143428.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:05:22,140 INFO [finetune.py:976] (5/7) Epoch 26, batch 250, loss[loss=0.1918, simple_loss=0.2716, pruned_loss=0.05595, over 4817.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2409, pruned_loss=0.04967, over 685439.27 frames. ], batch size: 33, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:05:48,883 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143461.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 07:05:53,067 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.741e+01 1.618e+02 1.961e+02 2.394e+02 5.476e+02, threshold=3.922e+02, percent-clipped=2.0 +2023-03-27 07:05:59,913 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143473.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:06:12,189 INFO [finetune.py:976] (5/7) Epoch 26, batch 300, loss[loss=0.1693, simple_loss=0.2519, pruned_loss=0.04341, over 4800.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.2434, pruned_loss=0.04943, over 746214.29 frames. ], batch size: 29, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:06:16,025 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 07:06:40,195 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143534.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:06:44,913 INFO [finetune.py:976] (5/7) Epoch 26, batch 350, loss[loss=0.2002, simple_loss=0.279, pruned_loss=0.06069, over 4831.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2464, pruned_loss=0.05065, over 793833.60 frames. ], batch size: 33, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:06:51,527 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6552, 2.4771, 2.3329, 1.6258, 2.5795, 1.9947, 1.8490, 2.3136], + device='cuda:5'), covar=tensor([0.1168, 0.0724, 0.1517, 0.1889, 0.1432, 0.1818, 0.1964, 0.1028], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0194, 0.0202, 0.0184, 0.0212, 0.0213, 0.0226, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:07:03,059 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.042e+02 1.441e+02 1.724e+02 2.077e+02 3.544e+02, threshold=3.448e+02, percent-clipped=0.0 +2023-03-27 07:07:18,107 INFO [finetune.py:976] (5/7) Epoch 26, batch 400, loss[loss=0.1639, simple_loss=0.2295, pruned_loss=0.04914, over 4080.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2474, pruned_loss=0.05087, over 830122.12 frames. ], batch size: 65, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:07:40,638 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.62 vs. limit=5.0 +2023-03-27 07:07:54,069 INFO [finetune.py:976] (5/7) Epoch 26, batch 450, loss[loss=0.1568, simple_loss=0.2238, pruned_loss=0.04493, over 4328.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2459, pruned_loss=0.05049, over 856776.86 frames. ], batch size: 65, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:07:58,551 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0780, 1.9400, 2.0973, 1.3255, 2.1170, 2.1485, 2.1303, 1.6000], + device='cuda:5'), covar=tensor([0.0640, 0.0776, 0.0723, 0.0915, 0.0826, 0.0734, 0.0638, 0.1343], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0137, 0.0142, 0.0119, 0.0128, 0.0138, 0.0140, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:08:22,190 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.245e+01 1.544e+02 1.809e+02 2.165e+02 3.752e+02, threshold=3.619e+02, percent-clipped=3.0 +2023-03-27 07:08:37,482 INFO [finetune.py:976] (5/7) Epoch 26, batch 500, loss[loss=0.1687, simple_loss=0.2259, pruned_loss=0.0558, over 4156.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.2426, pruned_loss=0.04914, over 879880.58 frames. ], batch size: 18, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:09:00,520 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7773, 1.9570, 1.6471, 1.6145, 2.3238, 2.3460, 1.9738, 1.8880], + device='cuda:5'), covar=tensor([0.0506, 0.0385, 0.0611, 0.0378, 0.0290, 0.0552, 0.0353, 0.0404], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0107, 0.0146, 0.0111, 0.0101, 0.0115, 0.0102, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.8055e-05, 8.1436e-05, 1.1360e-04, 8.4894e-05, 7.8390e-05, 8.5180e-05, + 7.5851e-05, 8.5470e-05], device='cuda:5') +2023-03-27 07:09:11,112 INFO [finetune.py:976] (5/7) Epoch 26, batch 550, loss[loss=0.1733, simple_loss=0.2393, pruned_loss=0.0537, over 4896.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2398, pruned_loss=0.04832, over 897835.98 frames. ], batch size: 35, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:09:20,249 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143756.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 07:09:28,910 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.175e+01 1.443e+02 1.723e+02 1.984e+02 5.074e+02, threshold=3.446e+02, percent-clipped=2.0 +2023-03-27 07:09:44,564 INFO [finetune.py:976] (5/7) Epoch 26, batch 600, loss[loss=0.1634, simple_loss=0.2291, pruned_loss=0.04888, over 4689.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2402, pruned_loss=0.04836, over 909513.61 frames. ], batch size: 23, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:09:56,196 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9769, 4.5990, 4.3092, 2.4703, 4.6096, 3.5167, 0.7902, 3.3040], + device='cuda:5'), covar=tensor([0.2357, 0.2336, 0.1497, 0.3219, 0.0913, 0.0886, 0.5062, 0.1509], + device='cuda:5'), in_proj_covar=tensor([0.0149, 0.0178, 0.0160, 0.0129, 0.0160, 0.0123, 0.0148, 0.0123], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 07:10:09,715 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143829.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:10:17,584 INFO [finetune.py:976] (5/7) Epoch 26, batch 650, loss[loss=0.1663, simple_loss=0.2378, pruned_loss=0.04743, over 4904.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2418, pruned_loss=0.04826, over 920252.88 frames. ], batch size: 35, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:10:41,257 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.600e+02 1.821e+02 2.293e+02 5.159e+02, threshold=3.642e+02, percent-clipped=4.0 +2023-03-27 07:11:12,407 INFO [finetune.py:976] (5/7) Epoch 26, batch 700, loss[loss=0.1428, simple_loss=0.2261, pruned_loss=0.02973, over 4862.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2434, pruned_loss=0.049, over 927534.92 frames. ], batch size: 34, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:11:50,670 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-03-27 07:11:52,960 INFO [finetune.py:976] (5/7) Epoch 26, batch 750, loss[loss=0.1689, simple_loss=0.249, pruned_loss=0.04435, over 4920.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2441, pruned_loss=0.04867, over 935602.26 frames. ], batch size: 38, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:12:09,863 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.567e+02 1.788e+02 2.169e+02 3.888e+02, threshold=3.576e+02, percent-clipped=1.0 +2023-03-27 07:12:26,436 INFO [finetune.py:976] (5/7) Epoch 26, batch 800, loss[loss=0.1557, simple_loss=0.2255, pruned_loss=0.04299, over 4825.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2445, pruned_loss=0.04856, over 941105.24 frames. ], batch size: 30, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:12:26,565 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8176, 1.5224, 1.2348, 1.2977, 1.9823, 2.0696, 1.7195, 1.4702], + device='cuda:5'), covar=tensor([0.0318, 0.0437, 0.0849, 0.0484, 0.0250, 0.0437, 0.0349, 0.0500], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0107, 0.0147, 0.0111, 0.0102, 0.0116, 0.0103, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.8885e-05, 8.1927e-05, 1.1450e-04, 8.5344e-05, 7.9154e-05, 8.5640e-05, + 7.6408e-05, 8.6028e-05], device='cuda:5') +2023-03-27 07:12:56,495 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1736, 2.0383, 2.1097, 1.4602, 2.0613, 2.1922, 2.2355, 1.6847], + device='cuda:5'), covar=tensor([0.0571, 0.0613, 0.0666, 0.0850, 0.0770, 0.0583, 0.0499, 0.1140], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0138, 0.0142, 0.0120, 0.0129, 0.0139, 0.0141, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:13:00,657 INFO [finetune.py:976] (5/7) Epoch 26, batch 850, loss[loss=0.1641, simple_loss=0.2368, pruned_loss=0.04568, over 4819.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2425, pruned_loss=0.04796, over 943865.74 frames. ], batch size: 40, lr: 2.98e-03, grad_scale: 16.0 +2023-03-27 07:13:09,698 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=144056.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:13:16,918 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.460e+02 1.746e+02 2.115e+02 7.519e+02, threshold=3.492e+02, percent-clipped=2.0 +2023-03-27 07:13:43,957 INFO [finetune.py:976] (5/7) Epoch 26, batch 900, loss[loss=0.1764, simple_loss=0.2478, pruned_loss=0.05252, over 4862.00 frames. ], tot_loss[loss=0.1668, simple_loss=0.2393, pruned_loss=0.04714, over 945101.25 frames. ], batch size: 31, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:13:47,590 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 07:13:51,330 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=144104.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:13:57,155 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-03-27 07:14:00,963 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0367, 1.0082, 0.9344, 1.1588, 1.2063, 1.1571, 1.0187, 0.9469], + device='cuda:5'), covar=tensor([0.0447, 0.0320, 0.0667, 0.0328, 0.0314, 0.0517, 0.0347, 0.0453], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0107, 0.0147, 0.0112, 0.0102, 0.0116, 0.0104, 0.0114], + device='cuda:5'), out_proj_covar=tensor([7.9036e-05, 8.2097e-05, 1.1483e-04, 8.5675e-05, 7.9470e-05, 8.6025e-05, + 7.6873e-05, 8.6522e-05], device='cuda:5') +2023-03-27 07:14:07,459 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=144129.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 07:14:09,744 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1848, 1.7544, 2.3657, 1.6947, 2.1597, 2.4893, 1.6860, 2.5986], + device='cuda:5'), covar=tensor([0.1290, 0.2120, 0.1401, 0.1833, 0.0939, 0.1271, 0.2978, 0.0747], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0204, 0.0191, 0.0189, 0.0173, 0.0211, 0.0215, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:14:16,861 INFO [finetune.py:976] (5/7) Epoch 26, batch 950, loss[loss=0.1895, simple_loss=0.2527, pruned_loss=0.06319, over 4811.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.2389, pruned_loss=0.04776, over 948233.27 frames. ], batch size: 38, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:14:33,141 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.468e+02 1.742e+02 2.065e+02 3.876e+02, threshold=3.485e+02, percent-clipped=2.0 +2023-03-27 07:14:34,474 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3617, 1.3329, 1.7397, 1.6180, 1.4802, 3.2257, 1.4111, 1.4592], + device='cuda:5'), covar=tensor([0.1016, 0.1747, 0.1143, 0.0953, 0.1595, 0.0262, 0.1408, 0.1814], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:14:37,446 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9974, 1.8287, 1.8003, 0.8293, 2.1827, 2.3092, 2.0262, 1.6900], + device='cuda:5'), covar=tensor([0.0925, 0.0703, 0.0534, 0.0691, 0.0505, 0.0641, 0.0533, 0.0681], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0146, 0.0127, 0.0121, 0.0129, 0.0128, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([8.8755e-05, 1.0528e-04, 9.0243e-05, 8.5161e-05, 9.0300e-05, 9.0721e-05, + 1.0042e-04, 1.0523e-04], device='cuda:5') +2023-03-27 07:14:39,230 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=144177.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:14:50,352 INFO [finetune.py:976] (5/7) Epoch 26, batch 1000, loss[loss=0.1876, simple_loss=0.2495, pruned_loss=0.06283, over 4795.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2403, pruned_loss=0.04834, over 950178.84 frames. ], batch size: 25, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:15:00,545 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 07:15:22,316 INFO [finetune.py:976] (5/7) Epoch 26, batch 1050, loss[loss=0.166, simple_loss=0.242, pruned_loss=0.04496, over 4905.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2426, pruned_loss=0.04885, over 952341.83 frames. ], batch size: 37, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:15:35,400 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6509, 1.6964, 1.4434, 1.8006, 1.9413, 1.7594, 1.2882, 1.3953], + device='cuda:5'), covar=tensor([0.2222, 0.1889, 0.1967, 0.1660, 0.1701, 0.1237, 0.2479, 0.1857], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0209, 0.0215, 0.0198, 0.0244, 0.0191, 0.0216, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:15:40,006 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.483e+02 1.785e+02 2.219e+02 5.161e+02, threshold=3.570e+02, percent-clipped=2.0 +2023-03-27 07:16:01,566 INFO [finetune.py:976] (5/7) Epoch 26, batch 1100, loss[loss=0.186, simple_loss=0.2433, pruned_loss=0.06429, over 4715.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2448, pruned_loss=0.0497, over 953107.42 frames. ], batch size: 23, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:16:56,475 INFO [finetune.py:976] (5/7) Epoch 26, batch 1150, loss[loss=0.1649, simple_loss=0.2405, pruned_loss=0.04469, over 4775.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.246, pruned_loss=0.04959, over 955253.73 frames. ], batch size: 51, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:17:04,214 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7074, 1.2754, 0.9324, 1.5358, 2.0060, 1.5045, 1.4834, 1.6604], + device='cuda:5'), covar=tensor([0.1344, 0.1843, 0.1804, 0.1162, 0.1955, 0.1905, 0.1366, 0.1794], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0092, 0.0120, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 07:17:13,864 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.760e+02 2.197e+02 4.327e+02, threshold=3.521e+02, percent-clipped=2.0 +2023-03-27 07:17:14,557 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4733, 1.3801, 2.0969, 3.2835, 2.0915, 2.3841, 1.1608, 2.7444], + device='cuda:5'), covar=tensor([0.1776, 0.1479, 0.1270, 0.0581, 0.0841, 0.1496, 0.1742, 0.0447], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0134, 0.0165, 0.0101, 0.0136, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 07:17:14,578 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5480, 1.4710, 1.4350, 1.5437, 1.1430, 3.3683, 1.2536, 1.7943], + device='cuda:5'), covar=tensor([0.3287, 0.2498, 0.2204, 0.2440, 0.1782, 0.0218, 0.2836, 0.1232], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0113, 0.0096, 0.0095, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:17:28,569 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5544, 3.7748, 3.6154, 1.7683, 3.9098, 2.8939, 0.8643, 2.7305], + device='cuda:5'), covar=tensor([0.2639, 0.1863, 0.1606, 0.3440, 0.0928, 0.1051, 0.4357, 0.1489], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0179, 0.0161, 0.0132, 0.0162, 0.0125, 0.0150, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 07:17:30,186 INFO [finetune.py:976] (5/7) Epoch 26, batch 1200, loss[loss=0.1398, simple_loss=0.2132, pruned_loss=0.03322, over 4924.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.2442, pruned_loss=0.04875, over 955017.38 frames. ], batch size: 42, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:17:43,720 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144411.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:18:03,373 INFO [finetune.py:976] (5/7) Epoch 26, batch 1250, loss[loss=0.1548, simple_loss=0.2231, pruned_loss=0.04323, over 4940.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2418, pruned_loss=0.04839, over 954844.96 frames. ], batch size: 33, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:18:21,718 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.894e+01 1.606e+02 1.805e+02 2.274e+02 3.881e+02, threshold=3.611e+02, percent-clipped=1.0 +2023-03-27 07:18:24,300 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144472.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:18:37,276 INFO [finetune.py:976] (5/7) Epoch 26, batch 1300, loss[loss=0.1648, simple_loss=0.2424, pruned_loss=0.04355, over 4942.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.2392, pruned_loss=0.04776, over 954668.08 frames. ], batch size: 38, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:19:21,355 INFO [finetune.py:976] (5/7) Epoch 26, batch 1350, loss[loss=0.1418, simple_loss=0.225, pruned_loss=0.02934, over 4898.00 frames. ], tot_loss[loss=0.1659, simple_loss=0.2378, pruned_loss=0.04701, over 955526.04 frames. ], batch size: 35, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:19:24,830 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9739, 1.5670, 2.2054, 1.5150, 2.0127, 2.2531, 1.5977, 2.3290], + device='cuda:5'), covar=tensor([0.1273, 0.2438, 0.1219, 0.1590, 0.0981, 0.1410, 0.2989, 0.0779], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0205, 0.0190, 0.0189, 0.0173, 0.0211, 0.0214, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:19:39,469 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.034e+02 1.499e+02 1.803e+02 2.073e+02 4.281e+02, threshold=3.607e+02, percent-clipped=1.0 +2023-03-27 07:19:54,499 INFO [finetune.py:976] (5/7) Epoch 26, batch 1400, loss[loss=0.1853, simple_loss=0.2685, pruned_loss=0.05104, over 4873.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2424, pruned_loss=0.04849, over 957057.88 frames. ], batch size: 34, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:20:12,924 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4560, 2.3689, 1.9478, 2.4613, 2.4318, 2.1073, 2.6595, 2.4327], + device='cuda:5'), covar=tensor([0.1334, 0.1843, 0.2878, 0.2197, 0.2440, 0.1684, 0.2437, 0.1700], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0189, 0.0235, 0.0253, 0.0249, 0.0206, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:20:14,054 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3954, 1.3316, 1.6433, 1.5405, 1.4490, 3.2591, 1.3165, 1.4507], + device='cuda:5'), covar=tensor([0.1033, 0.1890, 0.1178, 0.1040, 0.1758, 0.0252, 0.1500, 0.1885], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0092, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:20:27,739 INFO [finetune.py:976] (5/7) Epoch 26, batch 1450, loss[loss=0.1908, simple_loss=0.253, pruned_loss=0.06433, over 4890.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2443, pruned_loss=0.04914, over 956349.03 frames. ], batch size: 43, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:20:39,870 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-27 07:20:45,825 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.092e+01 1.571e+02 1.855e+02 2.334e+02 4.645e+02, threshold=3.710e+02, percent-clipped=2.0 +2023-03-27 07:21:01,363 INFO [finetune.py:976] (5/7) Epoch 26, batch 1500, loss[loss=0.1537, simple_loss=0.2267, pruned_loss=0.04031, over 4828.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2451, pruned_loss=0.04946, over 957922.67 frames. ], batch size: 30, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:21:06,996 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2283, 2.0757, 1.7430, 1.8527, 2.1864, 1.9102, 2.2675, 2.2197], + device='cuda:5'), covar=tensor([0.1313, 0.2033, 0.2847, 0.2469, 0.2474, 0.1649, 0.2868, 0.1677], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0190, 0.0235, 0.0253, 0.0249, 0.0206, 0.0214, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:21:29,498 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0918, 0.9604, 0.9653, 0.4173, 0.9599, 1.1498, 1.1495, 0.9534], + device='cuda:5'), covar=tensor([0.0894, 0.0600, 0.0586, 0.0510, 0.0533, 0.0593, 0.0381, 0.0638], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0146, 0.0127, 0.0121, 0.0129, 0.0128, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([8.8674e-05, 1.0523e-04, 9.0194e-05, 8.5367e-05, 9.0396e-05, 9.0581e-05, + 1.0028e-04, 1.0532e-04], device='cuda:5') +2023-03-27 07:21:50,335 INFO [finetune.py:976] (5/7) Epoch 26, batch 1550, loss[loss=0.1535, simple_loss=0.2256, pruned_loss=0.04077, over 4800.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2444, pruned_loss=0.04909, over 957757.16 frames. ], batch size: 25, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:21:56,272 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2039, 2.7972, 2.5808, 1.3154, 2.7376, 2.3489, 2.2781, 2.5243], + device='cuda:5'), covar=tensor([0.1175, 0.0870, 0.2057, 0.2473, 0.1946, 0.2389, 0.2006, 0.1329], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0192, 0.0200, 0.0182, 0.0210, 0.0211, 0.0224, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:22:18,425 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=144767.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:22:18,966 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.547e+02 1.850e+02 2.044e+02 4.068e+02, threshold=3.700e+02, percent-clipped=1.0 +2023-03-27 07:22:35,463 INFO [finetune.py:976] (5/7) Epoch 26, batch 1600, loss[loss=0.1866, simple_loss=0.252, pruned_loss=0.06056, over 4748.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2427, pruned_loss=0.04897, over 958132.77 frames. ], batch size: 59, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:23:09,214 INFO [finetune.py:976] (5/7) Epoch 26, batch 1650, loss[loss=0.1308, simple_loss=0.2135, pruned_loss=0.02405, over 4762.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2404, pruned_loss=0.04809, over 959336.62 frames. ], batch size: 28, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:23:18,174 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.2208, 4.5162, 4.7941, 5.0516, 4.9843, 4.7113, 5.3317, 1.7032], + device='cuda:5'), covar=tensor([0.0754, 0.0884, 0.0797, 0.0906, 0.1075, 0.1789, 0.0530, 0.6049], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0250, 0.0280, 0.0297, 0.0336, 0.0287, 0.0305, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:23:26,320 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.503e+01 1.460e+02 1.738e+02 2.010e+02 3.428e+02, threshold=3.475e+02, percent-clipped=0.0 +2023-03-27 07:23:42,420 INFO [finetune.py:976] (5/7) Epoch 26, batch 1700, loss[loss=0.1415, simple_loss=0.2271, pruned_loss=0.02791, over 4913.00 frames. ], tot_loss[loss=0.1658, simple_loss=0.2378, pruned_loss=0.04686, over 959979.23 frames. ], batch size: 37, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:24:08,190 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 07:24:17,866 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.8637, 1.5268, 1.3850, 0.9332, 1.6448, 1.6709, 1.7311, 1.4018], + device='cuda:5'), covar=tensor([0.0738, 0.0550, 0.0558, 0.0467, 0.0425, 0.0626, 0.0313, 0.0567], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0147, 0.0127, 0.0122, 0.0129, 0.0128, 0.0141, 0.0147], + device='cuda:5'), out_proj_covar=tensor([8.8456e-05, 1.0551e-04, 9.0641e-05, 8.5453e-05, 9.0413e-05, 9.0856e-05, + 1.0042e-04, 1.0539e-04], device='cuda:5') +2023-03-27 07:24:25,824 INFO [finetune.py:976] (5/7) Epoch 26, batch 1750, loss[loss=0.1209, simple_loss=0.1841, pruned_loss=0.02882, over 4060.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2394, pruned_loss=0.04794, over 956866.86 frames. ], batch size: 17, lr: 2.97e-03, grad_scale: 16.0 +2023-03-27 07:24:27,084 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-03-27 07:24:38,387 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5410, 2.3922, 2.0840, 1.0908, 2.2362, 1.9167, 1.8510, 2.3242], + device='cuda:5'), covar=tensor([0.0816, 0.0811, 0.1627, 0.2022, 0.1431, 0.2238, 0.2099, 0.0946], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0192, 0.0200, 0.0181, 0.0210, 0.0210, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:24:42,911 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.967e+01 1.592e+02 1.823e+02 2.389e+02 4.337e+02, threshold=3.645e+02, percent-clipped=3.0 +2023-03-27 07:24:50,721 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0559, 0.9969, 0.9995, 0.4518, 0.9834, 1.1695, 1.1525, 0.9948], + device='cuda:5'), covar=tensor([0.0876, 0.0575, 0.0557, 0.0532, 0.0568, 0.0644, 0.0432, 0.0658], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0127, 0.0122, 0.0129, 0.0128, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.8570e-05, 1.0562e-04, 9.0678e-05, 8.5589e-05, 9.0323e-05, 9.1001e-05, + 1.0051e-04, 1.0567e-04], device='cuda:5') +2023-03-27 07:24:59,584 INFO [finetune.py:976] (5/7) Epoch 26, batch 1800, loss[loss=0.185, simple_loss=0.2557, pruned_loss=0.05716, over 4157.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2433, pruned_loss=0.04905, over 956378.48 frames. ], batch size: 65, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:25:02,623 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144996.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:25:06,075 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.35 vs. limit=5.0 +2023-03-27 07:25:23,043 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145027.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:25:33,497 INFO [finetune.py:976] (5/7) Epoch 26, batch 1850, loss[loss=0.1817, simple_loss=0.2537, pruned_loss=0.05489, over 4837.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2461, pruned_loss=0.05027, over 957842.73 frames. ], batch size: 47, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:25:43,146 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145057.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:25:48,495 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145066.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:25:49,084 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145067.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:25:49,578 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.533e+02 1.829e+02 2.183e+02 4.392e+02, threshold=3.659e+02, percent-clipped=3.0 +2023-03-27 07:26:04,139 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145088.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:26:06,521 INFO [finetune.py:976] (5/7) Epoch 26, batch 1900, loss[loss=0.1439, simple_loss=0.2178, pruned_loss=0.03503, over 4746.00 frames. ], tot_loss[loss=0.1749, simple_loss=0.2478, pruned_loss=0.05104, over 958678.55 frames. ], batch size: 26, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:26:21,297 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=145115.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:26:24,430 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.84 vs. limit=5.0 +2023-03-27 07:26:29,556 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145127.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:26:46,687 INFO [finetune.py:976] (5/7) Epoch 26, batch 1950, loss[loss=0.1645, simple_loss=0.2321, pruned_loss=0.04842, over 4756.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.2465, pruned_loss=0.05066, over 957477.26 frames. ], batch size: 27, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:27:15,914 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.125e+02 1.577e+02 1.831e+02 2.188e+02 4.363e+02, threshold=3.662e+02, percent-clipped=3.0 +2023-03-27 07:27:40,100 INFO [finetune.py:976] (5/7) Epoch 26, batch 2000, loss[loss=0.1339, simple_loss=0.2127, pruned_loss=0.02752, over 4763.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.244, pruned_loss=0.0501, over 956717.37 frames. ], batch size: 26, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:28:04,633 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2616, 2.1253, 2.1583, 0.9487, 2.5219, 2.7128, 2.3460, 1.9900], + device='cuda:5'), covar=tensor([0.1187, 0.0873, 0.0606, 0.0817, 0.0612, 0.0678, 0.0510, 0.0712], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0127, 0.0122, 0.0129, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.8667e-05, 1.0577e-04, 9.0772e-05, 8.5941e-05, 9.0467e-05, 9.1235e-05, + 1.0054e-04, 1.0592e-04], device='cuda:5') +2023-03-27 07:28:09,876 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2318, 3.6882, 3.9660, 3.8806, 3.8439, 3.6046, 4.2994, 1.5062], + device='cuda:5'), covar=tensor([0.1267, 0.1902, 0.1708, 0.2098, 0.1939, 0.2664, 0.1314, 0.7737], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0249, 0.0279, 0.0294, 0.0336, 0.0287, 0.0304, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:28:13,278 INFO [finetune.py:976] (5/7) Epoch 26, batch 2050, loss[loss=0.1456, simple_loss=0.2247, pruned_loss=0.03322, over 4756.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2417, pruned_loss=0.04936, over 957344.51 frames. ], batch size: 23, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:28:22,526 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-27 07:28:30,376 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.187e+01 1.436e+02 1.792e+02 2.264e+02 4.038e+02, threshold=3.583e+02, percent-clipped=1.0 +2023-03-27 07:28:45,862 INFO [finetune.py:976] (5/7) Epoch 26, batch 2100, loss[loss=0.1605, simple_loss=0.2382, pruned_loss=0.04146, over 4818.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2412, pruned_loss=0.04913, over 958276.17 frames. ], batch size: 39, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:29:14,588 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-03-27 07:29:19,664 INFO [finetune.py:976] (5/7) Epoch 26, batch 2150, loss[loss=0.1237, simple_loss=0.2026, pruned_loss=0.02238, over 4760.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.243, pruned_loss=0.04989, over 955664.21 frames. ], batch size: 26, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:29:28,921 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145352.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:29:47,544 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.022e+02 1.494e+02 1.709e+02 2.298e+02 6.165e+02, threshold=3.419e+02, percent-clipped=3.0 +2023-03-27 07:29:48,886 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7926, 1.6535, 1.5465, 1.5840, 2.0523, 2.0066, 1.7532, 1.4977], + device='cuda:5'), covar=tensor([0.0390, 0.0371, 0.0593, 0.0366, 0.0246, 0.0450, 0.0398, 0.0452], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0109, 0.0150, 0.0113, 0.0103, 0.0118, 0.0105, 0.0115], + device='cuda:5'), out_proj_covar=tensor([8.0434e-05, 8.3185e-05, 1.1677e-04, 8.6760e-05, 8.0094e-05, 8.7030e-05, + 7.7789e-05, 8.7475e-05], device='cuda:5') +2023-03-27 07:29:49,508 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145371.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:29:51,950 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4155, 1.4093, 1.6820, 1.6896, 1.5746, 3.2907, 1.4207, 1.5241], + device='cuda:5'), covar=tensor([0.1026, 0.1855, 0.1153, 0.0959, 0.1607, 0.0248, 0.1450, 0.1730], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0072, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:29:53,233 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-03-27 07:29:56,779 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145383.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:30:02,627 INFO [finetune.py:976] (5/7) Epoch 26, batch 2200, loss[loss=0.1814, simple_loss=0.2601, pruned_loss=0.05135, over 4815.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2455, pruned_loss=0.05056, over 953752.38 frames. ], batch size: 33, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:30:23,253 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145422.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:30:23,293 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5568, 1.4840, 1.4276, 1.5614, 1.0655, 3.1771, 1.2178, 1.5695], + device='cuda:5'), covar=tensor([0.3155, 0.2383, 0.2063, 0.2309, 0.1782, 0.0225, 0.2660, 0.1264], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0124, 0.0112, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:30:29,355 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145432.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:30:36,246 INFO [finetune.py:976] (5/7) Epoch 26, batch 2250, loss[loss=0.2221, simple_loss=0.2906, pruned_loss=0.07685, over 4745.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2472, pruned_loss=0.05116, over 954062.18 frames. ], batch size: 54, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:30:53,945 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.189e+01 1.488e+02 1.760e+02 2.143e+02 3.776e+02, threshold=3.521e+02, percent-clipped=2.0 +2023-03-27 07:30:56,934 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4883, 1.0656, 0.7467, 1.2895, 1.9444, 0.7855, 1.2344, 1.2730], + device='cuda:5'), covar=tensor([0.1703, 0.2314, 0.1849, 0.1420, 0.2049, 0.1983, 0.1622, 0.2198], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0093, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 07:31:08,986 INFO [finetune.py:976] (5/7) Epoch 26, batch 2300, loss[loss=0.1478, simple_loss=0.226, pruned_loss=0.03476, over 4752.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2464, pruned_loss=0.05037, over 953984.78 frames. ], batch size: 27, lr: 2.97e-03, grad_scale: 32.0 +2023-03-27 07:31:11,768 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4005, 1.3255, 1.9432, 1.7047, 1.5384, 3.2730, 1.2571, 1.4165], + device='cuda:5'), covar=tensor([0.1168, 0.2136, 0.1410, 0.1038, 0.1710, 0.0305, 0.1750, 0.2135], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0072, 0.0076, 0.0090, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:31:15,884 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5611, 2.5829, 2.5158, 1.7285, 2.4341, 2.7181, 2.6438, 2.1531], + device='cuda:5'), covar=tensor([0.0544, 0.0537, 0.0702, 0.0867, 0.0951, 0.0694, 0.0654, 0.1053], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0137, 0.0141, 0.0119, 0.0128, 0.0139, 0.0140, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:31:41,358 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8581, 1.5977, 2.3291, 3.6711, 2.4452, 2.7776, 1.0551, 3.0642], + device='cuda:5'), covar=tensor([0.1600, 0.1324, 0.1227, 0.0454, 0.0794, 0.1822, 0.1760, 0.0375], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0133, 0.0164, 0.0101, 0.0135, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 07:31:42,487 INFO [finetune.py:976] (5/7) Epoch 26, batch 2350, loss[loss=0.1462, simple_loss=0.2163, pruned_loss=0.03802, over 4743.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2437, pruned_loss=0.04991, over 952744.82 frames. ], batch size: 54, lr: 2.96e-03, grad_scale: 32.0 +2023-03-27 07:32:06,624 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.544e+02 1.840e+02 2.226e+02 4.643e+02, threshold=3.680e+02, percent-clipped=1.0 +2023-03-27 07:32:34,352 INFO [finetune.py:976] (5/7) Epoch 26, batch 2400, loss[loss=0.1739, simple_loss=0.2433, pruned_loss=0.05226, over 4890.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2397, pruned_loss=0.04841, over 952603.04 frames. ], batch size: 35, lr: 2.96e-03, grad_scale: 32.0 +2023-03-27 07:32:35,680 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145594.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:32:56,325 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.26 vs. limit=5.0 +2023-03-27 07:33:17,104 INFO [finetune.py:976] (5/7) Epoch 26, batch 2450, loss[loss=0.1566, simple_loss=0.2249, pruned_loss=0.04419, over 4836.00 frames. ], tot_loss[loss=0.167, simple_loss=0.238, pruned_loss=0.04797, over 954757.86 frames. ], batch size: 49, lr: 2.96e-03, grad_scale: 32.0 +2023-03-27 07:33:17,840 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145643.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:33:23,854 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145652.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:33:26,194 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145655.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 07:33:34,920 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.521e+01 1.449e+02 1.824e+02 2.163e+02 4.630e+02, threshold=3.648e+02, percent-clipped=2.0 +2023-03-27 07:33:45,642 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145683.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:33:51,042 INFO [finetune.py:976] (5/7) Epoch 26, batch 2500, loss[loss=0.2161, simple_loss=0.2929, pruned_loss=0.06969, over 4742.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2389, pruned_loss=0.04786, over 954750.42 frames. ], batch size: 54, lr: 2.96e-03, grad_scale: 32.0 +2023-03-27 07:33:55,944 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=145700.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:33:58,927 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145704.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:34:11,655 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145722.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:34:15,144 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145727.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:34:17,566 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=145731.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:34:22,655 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.82 vs. limit=5.0 +2023-03-27 07:34:24,601 INFO [finetune.py:976] (5/7) Epoch 26, batch 2550, loss[loss=0.1752, simple_loss=0.2479, pruned_loss=0.0513, over 4864.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2427, pruned_loss=0.04892, over 953487.13 frames. ], batch size: 49, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:34:42,324 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.531e+01 1.551e+02 1.807e+02 2.106e+02 4.459e+02, threshold=3.615e+02, percent-clipped=2.0 +2023-03-27 07:34:43,552 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=145770.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:35:08,872 INFO [finetune.py:976] (5/7) Epoch 26, batch 2600, loss[loss=0.1784, simple_loss=0.2574, pruned_loss=0.04965, over 4810.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.245, pruned_loss=0.04979, over 950277.68 frames. ], batch size: 38, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:35:21,812 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.01 vs. limit=5.0 +2023-03-27 07:35:26,699 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 07:35:28,219 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145821.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:35:34,036 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1738, 3.6941, 3.9397, 3.8393, 3.7290, 3.5612, 4.3054, 1.4484], + device='cuda:5'), covar=tensor([0.1255, 0.1708, 0.1454, 0.1977, 0.2127, 0.2404, 0.1154, 0.7812], + device='cuda:5'), in_proj_covar=tensor([0.0349, 0.0247, 0.0278, 0.0294, 0.0335, 0.0286, 0.0303, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:35:42,707 INFO [finetune.py:976] (5/7) Epoch 26, batch 2650, loss[loss=0.1533, simple_loss=0.2325, pruned_loss=0.03703, over 4824.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2466, pruned_loss=0.0504, over 952177.31 frames. ], batch size: 39, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:35:45,828 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7846, 1.6568, 1.3906, 1.4384, 1.8231, 1.5442, 1.8113, 1.7551], + device='cuda:5'), covar=tensor([0.1418, 0.1954, 0.3095, 0.2508, 0.2640, 0.1800, 0.2659, 0.1863], + device='cuda:5'), in_proj_covar=tensor([0.0187, 0.0189, 0.0235, 0.0252, 0.0249, 0.0206, 0.0214, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:36:00,021 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.986e+01 1.512e+02 1.783e+02 2.110e+02 4.476e+02, threshold=3.566e+02, percent-clipped=1.0 +2023-03-27 07:36:09,563 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145882.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 07:36:16,424 INFO [finetune.py:976] (5/7) Epoch 26, batch 2700, loss[loss=0.2074, simple_loss=0.2664, pruned_loss=0.07421, over 4197.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.246, pruned_loss=0.05008, over 951801.83 frames. ], batch size: 65, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:36:25,835 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 07:36:49,653 INFO [finetune.py:976] (5/7) Epoch 26, batch 2750, loss[loss=0.1532, simple_loss=0.2206, pruned_loss=0.04284, over 4852.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2426, pruned_loss=0.04925, over 950583.56 frames. ], batch size: 25, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:36:55,125 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145950.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:37:07,589 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.569e+02 1.804e+02 2.200e+02 3.850e+02, threshold=3.609e+02, percent-clipped=2.0 +2023-03-27 07:37:29,450 INFO [finetune.py:976] (5/7) Epoch 26, batch 2800, loss[loss=0.1497, simple_loss=0.2245, pruned_loss=0.03746, over 4775.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.24, pruned_loss=0.04839, over 952486.02 frames. ], batch size: 26, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:37:39,469 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145999.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:38:14,451 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146027.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:38:24,986 INFO [finetune.py:976] (5/7) Epoch 26, batch 2850, loss[loss=0.1926, simple_loss=0.2619, pruned_loss=0.06163, over 4812.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2401, pruned_loss=0.04904, over 951477.71 frames. ], batch size: 45, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:38:37,499 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0820, 1.0027, 0.9936, 0.5379, 1.0424, 1.1865, 1.2041, 1.0157], + device='cuda:5'), covar=tensor([0.0835, 0.0689, 0.0638, 0.0472, 0.0599, 0.0719, 0.0428, 0.0776], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0127, 0.0122, 0.0130, 0.0129, 0.0141, 0.0148], + device='cuda:5'), out_proj_covar=tensor([8.8830e-05, 1.0603e-04, 9.0696e-05, 8.5779e-05, 9.0919e-05, 9.1163e-05, + 1.0073e-04, 1.0608e-04], device='cuda:5') +2023-03-27 07:38:42,244 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.185e+01 1.523e+02 1.819e+02 2.110e+02 4.930e+02, threshold=3.638e+02, percent-clipped=2.0 +2023-03-27 07:38:46,431 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=146075.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:38:58,299 INFO [finetune.py:976] (5/7) Epoch 26, batch 2900, loss[loss=0.2264, simple_loss=0.2978, pruned_loss=0.0775, over 4900.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2434, pruned_loss=0.05045, over 952801.64 frames. ], batch size: 36, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:39:31,498 INFO [finetune.py:976] (5/7) Epoch 26, batch 2950, loss[loss=0.2092, simple_loss=0.2743, pruned_loss=0.07209, over 4896.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2456, pruned_loss=0.0508, over 953615.27 frames. ], batch size: 37, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:39:36,361 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146149.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:39:48,816 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9992, 1.8193, 2.3582, 1.5178, 2.1147, 2.3643, 1.8239, 2.4545], + device='cuda:5'), covar=tensor([0.1405, 0.1960, 0.1348, 0.1874, 0.1018, 0.1220, 0.2477, 0.0954], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0207, 0.0193, 0.0189, 0.0174, 0.0213, 0.0217, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:39:49,300 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.099e+02 1.550e+02 1.859e+02 2.106e+02 3.478e+02, threshold=3.719e+02, percent-clipped=0.0 +2023-03-27 07:39:53,018 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7866, 1.5789, 2.1077, 1.3247, 2.0119, 2.1188, 1.4873, 2.2599], + device='cuda:5'), covar=tensor([0.1280, 0.2161, 0.1269, 0.1906, 0.0821, 0.1229, 0.2820, 0.0792], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0207, 0.0193, 0.0189, 0.0174, 0.0213, 0.0217, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:39:54,691 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146177.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:40:04,822 INFO [finetune.py:976] (5/7) Epoch 26, batch 3000, loss[loss=0.1826, simple_loss=0.258, pruned_loss=0.05359, over 4847.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2466, pruned_loss=0.05069, over 954509.13 frames. ], batch size: 44, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:40:04,822 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 07:40:19,935 INFO [finetune.py:1010] (5/7) Epoch 26, validation: loss=0.1577, simple_loss=0.2252, pruned_loss=0.04507, over 2265189.00 frames. +2023-03-27 07:40:19,936 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 07:40:35,421 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146210.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:40:38,422 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-03-27 07:40:56,714 INFO [finetune.py:976] (5/7) Epoch 26, batch 3050, loss[loss=0.1608, simple_loss=0.2425, pruned_loss=0.03954, over 4927.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2468, pruned_loss=0.05035, over 955086.97 frames. ], batch size: 41, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:41:02,122 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146250.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:41:12,746 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2588, 1.3797, 1.6068, 1.0698, 1.3314, 1.5234, 1.3222, 1.6993], + device='cuda:5'), covar=tensor([0.1108, 0.1992, 0.1094, 0.1417, 0.0924, 0.1157, 0.2809, 0.0823], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0207, 0.0193, 0.0190, 0.0175, 0.0213, 0.0217, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:41:14,891 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.646e+01 1.408e+02 1.795e+02 2.163e+02 4.679e+02, threshold=3.589e+02, percent-clipped=3.0 +2023-03-27 07:41:29,847 INFO [finetune.py:976] (5/7) Epoch 26, batch 3100, loss[loss=0.1545, simple_loss=0.2248, pruned_loss=0.04211, over 4822.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.245, pruned_loss=0.04994, over 956220.45 frames. ], batch size: 25, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:41:34,031 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=146298.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:41:34,685 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146299.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:42:02,610 INFO [finetune.py:976] (5/7) Epoch 26, batch 3150, loss[loss=0.1844, simple_loss=0.2557, pruned_loss=0.05649, over 4895.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2428, pruned_loss=0.04959, over 955035.51 frames. ], batch size: 35, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:42:06,019 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9157, 1.9280, 1.9671, 1.2798, 1.9770, 2.0852, 1.9694, 1.6665], + device='cuda:5'), covar=tensor([0.0615, 0.0714, 0.0751, 0.0927, 0.0752, 0.0685, 0.0655, 0.1176], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0137, 0.0141, 0.0120, 0.0128, 0.0138, 0.0140, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:42:06,560 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=146347.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:42:10,686 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4206, 1.3783, 1.9658, 1.7458, 1.5582, 3.2200, 1.2332, 1.5186], + device='cuda:5'), covar=tensor([0.1106, 0.2054, 0.1397, 0.0947, 0.1670, 0.0333, 0.1791, 0.2158], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 07:42:21,140 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.044e+02 1.464e+02 1.851e+02 2.163e+02 3.423e+02, threshold=3.701e+02, percent-clipped=0.0 +2023-03-27 07:42:38,056 INFO [finetune.py:976] (5/7) Epoch 26, batch 3200, loss[loss=0.1572, simple_loss=0.2273, pruned_loss=0.04351, over 4899.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2397, pruned_loss=0.04849, over 955461.84 frames. ], batch size: 35, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:42:38,355 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-03-27 07:42:50,414 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7528, 3.8374, 3.6409, 1.8055, 3.9794, 2.9292, 0.8459, 2.7935], + device='cuda:5'), covar=tensor([0.2607, 0.2483, 0.1662, 0.3574, 0.1020, 0.1088, 0.4834, 0.1542], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0180, 0.0161, 0.0131, 0.0162, 0.0124, 0.0150, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 07:43:05,748 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0839, 1.4897, 2.0516, 2.1371, 1.8824, 1.8565, 2.0475, 2.0032], + device='cuda:5'), covar=tensor([0.3448, 0.3764, 0.3206, 0.3345, 0.4828, 0.3747, 0.4353, 0.2991], + device='cuda:5'), in_proj_covar=tensor([0.0267, 0.0247, 0.0268, 0.0295, 0.0295, 0.0272, 0.0302, 0.0252], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:43:35,344 INFO [finetune.py:976] (5/7) Epoch 26, batch 3250, loss[loss=0.2251, simple_loss=0.2839, pruned_loss=0.08312, over 4898.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2405, pruned_loss=0.04913, over 955767.18 frames. ], batch size: 43, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:43:53,730 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.451e+02 1.808e+02 2.175e+02 4.535e+02, threshold=3.616e+02, percent-clipped=3.0 +2023-03-27 07:43:56,254 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8314, 1.5805, 2.3007, 3.3925, 2.1891, 2.5066, 1.0180, 2.8621], + device='cuda:5'), covar=tensor([0.1773, 0.1566, 0.1370, 0.0915, 0.0963, 0.1515, 0.2221, 0.0600], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0101, 0.0134, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 07:43:58,677 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146477.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 07:44:02,099 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6377, 1.4968, 1.3470, 1.8117, 2.0590, 1.7342, 1.2706, 1.3522], + device='cuda:5'), covar=tensor([0.2163, 0.2117, 0.1991, 0.1601, 0.1624, 0.1248, 0.2648, 0.1879], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0211, 0.0216, 0.0199, 0.0247, 0.0192, 0.0219, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:44:02,164 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.87 vs. limit=5.0 +2023-03-27 07:44:08,660 INFO [finetune.py:976] (5/7) Epoch 26, batch 3300, loss[loss=0.2072, simple_loss=0.283, pruned_loss=0.0657, over 4874.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.244, pruned_loss=0.05027, over 955263.90 frames. ], batch size: 44, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:44:17,130 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146505.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:44:30,709 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=146525.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:44:41,544 INFO [finetune.py:976] (5/7) Epoch 26, batch 3350, loss[loss=0.1765, simple_loss=0.242, pruned_loss=0.05551, over 4167.00 frames. ], tot_loss[loss=0.174, simple_loss=0.2465, pruned_loss=0.05079, over 955609.27 frames. ], batch size: 65, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:44:53,455 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5498, 2.4666, 2.1615, 1.0972, 2.2634, 1.9326, 1.8522, 2.2923], + device='cuda:5'), covar=tensor([0.1010, 0.0762, 0.1593, 0.2135, 0.1433, 0.1945, 0.2030, 0.0961], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0191, 0.0199, 0.0181, 0.0208, 0.0209, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:45:00,358 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.852e+01 1.562e+02 1.841e+02 2.282e+02 4.006e+02, threshold=3.682e+02, percent-clipped=1.0 +2023-03-27 07:45:14,934 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146591.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:45:15,435 INFO [finetune.py:976] (5/7) Epoch 26, batch 3400, loss[loss=0.1746, simple_loss=0.2467, pruned_loss=0.05121, over 4916.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.247, pruned_loss=0.05074, over 955964.88 frames. ], batch size: 37, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:45:46,300 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9189, 1.6306, 2.2108, 1.3951, 1.9909, 2.1254, 1.5474, 2.1957], + device='cuda:5'), covar=tensor([0.1249, 0.2053, 0.1265, 0.1876, 0.0938, 0.1314, 0.2696, 0.0849], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0206, 0.0193, 0.0190, 0.0175, 0.0213, 0.0217, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:45:48,166 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146626.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:45:58,659 INFO [finetune.py:976] (5/7) Epoch 26, batch 3450, loss[loss=0.1879, simple_loss=0.2591, pruned_loss=0.05831, over 4892.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2456, pruned_loss=0.04998, over 954667.65 frames. ], batch size: 32, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:46:03,013 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5828, 3.4684, 3.2830, 1.5709, 3.5623, 2.6210, 0.9528, 2.4375], + device='cuda:5'), covar=tensor([0.2357, 0.2145, 0.1778, 0.3438, 0.1194, 0.1093, 0.4388, 0.1558], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0181, 0.0162, 0.0131, 0.0163, 0.0125, 0.0151, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 07:46:04,885 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146652.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:46:17,062 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.040e+02 1.450e+02 1.708e+02 2.017e+02 4.995e+02, threshold=3.417e+02, percent-clipped=1.0 +2023-03-27 07:46:27,901 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8236, 3.8354, 3.6000, 1.8582, 3.8829, 2.8003, 1.0263, 2.7549], + device='cuda:5'), covar=tensor([0.2303, 0.1866, 0.1486, 0.3139, 0.1035, 0.1008, 0.4214, 0.1439], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0180, 0.0161, 0.0131, 0.0162, 0.0124, 0.0150, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 07:46:28,566 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146687.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 07:46:32,389 INFO [finetune.py:976] (5/7) Epoch 26, batch 3500, loss[loss=0.2078, simple_loss=0.2685, pruned_loss=0.07356, over 4830.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2442, pruned_loss=0.04993, over 954442.27 frames. ], batch size: 38, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:47:05,283 INFO [finetune.py:976] (5/7) Epoch 26, batch 3550, loss[loss=0.1885, simple_loss=0.249, pruned_loss=0.064, over 4909.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2427, pruned_loss=0.05009, over 954447.89 frames. ], batch size: 36, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:47:15,012 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6494, 1.6657, 1.3775, 1.6502, 1.9724, 1.9223, 1.5604, 1.4377], + device='cuda:5'), covar=tensor([0.0432, 0.0330, 0.0709, 0.0301, 0.0217, 0.0451, 0.0404, 0.0438], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0107, 0.0147, 0.0112, 0.0102, 0.0117, 0.0104, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.9298e-05, 8.2150e-05, 1.1478e-04, 8.5592e-05, 7.8677e-05, 8.6100e-05, + 7.7227e-05, 8.6056e-05], device='cuda:5') +2023-03-27 07:47:22,721 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.051e+02 1.430e+02 1.748e+02 2.315e+02 5.079e+02, threshold=3.497e+02, percent-clipped=6.0 +2023-03-27 07:47:28,057 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3909, 2.3329, 2.0640, 2.4572, 2.9427, 2.4106, 2.4332, 1.8217], + device='cuda:5'), covar=tensor([0.2162, 0.1836, 0.1867, 0.1680, 0.1678, 0.1106, 0.1878, 0.1888], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0212, 0.0216, 0.0200, 0.0247, 0.0192, 0.0219, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:47:38,115 INFO [finetune.py:976] (5/7) Epoch 26, batch 3600, loss[loss=0.1533, simple_loss=0.2323, pruned_loss=0.0371, over 4920.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2407, pruned_loss=0.04927, over 953998.57 frames. ], batch size: 37, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:47:47,325 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146805.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:48:17,837 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-27 07:48:26,742 INFO [finetune.py:976] (5/7) Epoch 26, batch 3650, loss[loss=0.182, simple_loss=0.255, pruned_loss=0.05447, over 4718.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.243, pruned_loss=0.05024, over 953859.91 frames. ], batch size: 59, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:48:28,127 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146844.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:48:39,243 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=146853.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:48:53,568 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.488e+01 1.511e+02 1.813e+02 2.229e+02 3.524e+02, threshold=3.627e+02, percent-clipped=1.0 +2023-03-27 07:49:12,956 INFO [finetune.py:976] (5/7) Epoch 26, batch 3700, loss[loss=0.1946, simple_loss=0.255, pruned_loss=0.06712, over 4238.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.2452, pruned_loss=0.05025, over 951985.41 frames. ], batch size: 65, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:49:21,436 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146905.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:49:46,520 INFO [finetune.py:976] (5/7) Epoch 26, batch 3750, loss[loss=0.1544, simple_loss=0.2356, pruned_loss=0.0366, over 4816.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2452, pruned_loss=0.05007, over 950585.56 frames. ], batch size: 47, lr: 2.96e-03, grad_scale: 16.0 +2023-03-27 07:49:49,621 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146947.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:50:03,845 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.820e+01 1.503e+02 1.791e+02 2.461e+02 5.017e+02, threshold=3.581e+02, percent-clipped=5.0 +2023-03-27 07:50:12,654 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146982.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 07:50:17,294 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8212, 1.2699, 1.6689, 1.8364, 1.5833, 1.5788, 1.7654, 1.7016], + device='cuda:5'), covar=tensor([0.4947, 0.4392, 0.4304, 0.4212, 0.5839, 0.4567, 0.5264, 0.4000], + device='cuda:5'), in_proj_covar=tensor([0.0266, 0.0247, 0.0267, 0.0295, 0.0295, 0.0271, 0.0301, 0.0251], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:50:19,583 INFO [finetune.py:976] (5/7) Epoch 26, batch 3800, loss[loss=0.1725, simple_loss=0.2565, pruned_loss=0.04426, over 4920.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2467, pruned_loss=0.05018, over 950888.34 frames. ], batch size: 33, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:50:21,418 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9222, 1.8284, 1.6848, 2.0896, 2.5018, 2.0407, 1.7996, 1.5324], + device='cuda:5'), covar=tensor([0.2070, 0.1887, 0.1819, 0.1556, 0.1434, 0.1139, 0.2158, 0.1889], + device='cuda:5'), in_proj_covar=tensor([0.0245, 0.0211, 0.0214, 0.0198, 0.0245, 0.0191, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:50:25,729 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5387, 1.4742, 1.4415, 1.4721, 0.9308, 2.9229, 1.1385, 1.4622], + device='cuda:5'), covar=tensor([0.3303, 0.2563, 0.2122, 0.2275, 0.1877, 0.0263, 0.2548, 0.1284], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0124, 0.0113, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:50:55,311 INFO [finetune.py:976] (5/7) Epoch 26, batch 3850, loss[loss=0.1477, simple_loss=0.2282, pruned_loss=0.03366, over 4789.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2446, pruned_loss=0.04919, over 951751.80 frames. ], batch size: 29, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:51:21,148 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.517e+02 1.854e+02 2.266e+02 5.483e+02, threshold=3.707e+02, percent-clipped=2.0 +2023-03-27 07:51:37,020 INFO [finetune.py:976] (5/7) Epoch 26, batch 3900, loss[loss=0.1497, simple_loss=0.2291, pruned_loss=0.03511, over 4927.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.241, pruned_loss=0.048, over 951314.74 frames. ], batch size: 33, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:52:09,603 INFO [finetune.py:976] (5/7) Epoch 26, batch 3950, loss[loss=0.1613, simple_loss=0.2209, pruned_loss=0.05088, over 3870.00 frames. ], tot_loss[loss=0.1653, simple_loss=0.2374, pruned_loss=0.04663, over 950087.39 frames. ], batch size: 17, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:52:27,908 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.014e+02 1.492e+02 1.682e+02 1.976e+02 2.814e+02, threshold=3.365e+02, percent-clipped=0.0 +2023-03-27 07:52:42,787 INFO [finetune.py:976] (5/7) Epoch 26, batch 4000, loss[loss=0.1713, simple_loss=0.2625, pruned_loss=0.04002, over 4911.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2382, pruned_loss=0.04721, over 951827.64 frames. ], batch size: 36, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:52:48,865 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147200.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:53:15,811 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147241.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:53:16,323 INFO [finetune.py:976] (5/7) Epoch 26, batch 4050, loss[loss=0.2222, simple_loss=0.2958, pruned_loss=0.07434, over 4807.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2419, pruned_loss=0.04868, over 950388.21 frames. ], batch size: 51, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:53:21,642 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147247.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:53:22,263 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1417, 1.7718, 2.1647, 1.6861, 2.1737, 2.3966, 2.3088, 1.4054], + device='cuda:5'), covar=tensor([0.0776, 0.1055, 0.0866, 0.0980, 0.0796, 0.0739, 0.0791, 0.1887], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0134, 0.0139, 0.0117, 0.0126, 0.0135, 0.0137, 0.0158], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:53:48,586 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.158e+02 1.660e+02 1.920e+02 2.375e+02 4.575e+02, threshold=3.840e+02, percent-clipped=6.0 +2023-03-27 07:53:58,892 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147282.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:54:08,320 INFO [finetune.py:976] (5/7) Epoch 26, batch 4100, loss[loss=0.1469, simple_loss=0.2114, pruned_loss=0.04124, over 4718.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.245, pruned_loss=0.04976, over 949645.38 frames. ], batch size: 23, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:54:13,960 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=147295.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:54:14,928 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-27 07:54:18,795 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147302.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:54:24,626 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147310.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:54:37,224 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=147330.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:54:44,911 INFO [finetune.py:976] (5/7) Epoch 26, batch 4150, loss[loss=0.1711, simple_loss=0.2541, pruned_loss=0.0441, over 4815.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2452, pruned_loss=0.04936, over 949685.55 frames. ], batch size: 38, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:55:03,459 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.520e+02 1.873e+02 2.208e+02 5.004e+02, threshold=3.746e+02, percent-clipped=1.0 +2023-03-27 07:55:05,278 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147371.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:55:18,278 INFO [finetune.py:976] (5/7) Epoch 26, batch 4200, loss[loss=0.1612, simple_loss=0.2406, pruned_loss=0.04093, over 4883.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2454, pruned_loss=0.04923, over 950589.51 frames. ], batch size: 43, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:55:28,556 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147407.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:55:46,438 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147434.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:55:51,223 INFO [finetune.py:976] (5/7) Epoch 26, batch 4250, loss[loss=0.1397, simple_loss=0.2131, pruned_loss=0.03313, over 4895.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2431, pruned_loss=0.04896, over 950060.29 frames. ], batch size: 35, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:56:16,336 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147468.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:56:16,793 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.473e+02 1.815e+02 2.255e+02 8.587e+02, threshold=3.630e+02, percent-clipped=2.0 +2023-03-27 07:56:24,083 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1262, 1.8717, 2.0510, 1.5194, 2.1354, 2.2987, 2.2029, 1.4858], + device='cuda:5'), covar=tensor([0.0561, 0.0872, 0.0792, 0.0963, 0.0858, 0.0668, 0.0668, 0.1757], + device='cuda:5'), in_proj_covar=tensor([0.0129, 0.0134, 0.0139, 0.0118, 0.0126, 0.0136, 0.0137, 0.0159], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:56:34,761 INFO [finetune.py:976] (5/7) Epoch 26, batch 4300, loss[loss=0.1286, simple_loss=0.2149, pruned_loss=0.02109, over 4864.00 frames. ], tot_loss[loss=0.169, simple_loss=0.2411, pruned_loss=0.04846, over 951009.85 frames. ], batch size: 44, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:56:36,710 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147495.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:56:40,202 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147500.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:56:47,239 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0531, 1.8525, 1.5999, 1.6047, 1.7366, 1.7368, 1.8144, 2.4889], + device='cuda:5'), covar=tensor([0.3413, 0.3612, 0.3078, 0.3434, 0.3777, 0.2303, 0.3371, 0.1479], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0267, 0.0239, 0.0279, 0.0261, 0.0232, 0.0260, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:57:05,974 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.55 vs. limit=5.0 +2023-03-27 07:57:08,538 INFO [finetune.py:976] (5/7) Epoch 26, batch 4350, loss[loss=0.1386, simple_loss=0.2095, pruned_loss=0.03383, over 4870.00 frames. ], tot_loss[loss=0.1653, simple_loss=0.237, pruned_loss=0.04675, over 951692.97 frames. ], batch size: 34, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:57:10,167 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-27 07:57:12,266 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=147548.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:57:19,357 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1449, 1.3799, 1.5773, 1.3424, 1.4613, 2.4845, 1.2672, 1.4904], + device='cuda:5'), covar=tensor([0.1016, 0.1780, 0.0916, 0.0926, 0.1644, 0.0396, 0.1598, 0.1795], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0076, 0.0091, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 07:57:20,541 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.35 vs. limit=5.0 +2023-03-27 07:57:26,970 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.870e+01 1.462e+02 1.667e+02 1.917e+02 5.708e+02, threshold=3.333e+02, percent-clipped=2.0 +2023-03-27 07:57:42,396 INFO [finetune.py:976] (5/7) Epoch 26, batch 4400, loss[loss=0.2105, simple_loss=0.2829, pruned_loss=0.06906, over 4164.00 frames. ], tot_loss[loss=0.166, simple_loss=0.2377, pruned_loss=0.0472, over 949586.37 frames. ], batch size: 65, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:57:45,519 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147597.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:58:16,297 INFO [finetune.py:976] (5/7) Epoch 26, batch 4450, loss[loss=0.2419, simple_loss=0.3058, pruned_loss=0.08895, over 4802.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2426, pruned_loss=0.04868, over 952102.97 frames. ], batch size: 41, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:58:19,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5495, 2.3613, 2.1182, 1.2086, 2.2219, 1.9402, 1.7450, 2.2254], + device='cuda:5'), covar=tensor([0.0923, 0.0764, 0.1443, 0.1994, 0.1451, 0.2354, 0.2271, 0.0925], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0193, 0.0202, 0.0184, 0.0211, 0.0213, 0.0226, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 07:58:27,312 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147659.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:58:33,183 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.7165, 1.6515, 1.8298, 0.9759, 1.9401, 2.1725, 1.9533, 1.6194], + device='cuda:5'), covar=tensor([0.1036, 0.0951, 0.0654, 0.0658, 0.0545, 0.0568, 0.0554, 0.0743], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0148, 0.0128, 0.0122, 0.0130, 0.0129, 0.0141, 0.0149], + device='cuda:5'), out_proj_covar=tensor([8.8715e-05, 1.0667e-04, 9.1142e-05, 8.5855e-05, 9.0903e-05, 9.1306e-05, + 1.0062e-04, 1.0652e-04], device='cuda:5') +2023-03-27 07:58:34,815 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147666.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:58:36,565 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.042e+02 1.542e+02 1.864e+02 2.192e+02 3.736e+02, threshold=3.727e+02, percent-clipped=4.0 +2023-03-27 07:59:06,170 INFO [finetune.py:976] (5/7) Epoch 26, batch 4500, loss[loss=0.1577, simple_loss=0.2313, pruned_loss=0.0421, over 4398.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2451, pruned_loss=0.05016, over 952552.88 frames. ], batch size: 19, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 07:59:37,006 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147720.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 07:59:52,141 INFO [finetune.py:976] (5/7) Epoch 26, batch 4550, loss[loss=0.1815, simple_loss=0.2547, pruned_loss=0.05416, over 4234.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2463, pruned_loss=0.05037, over 951432.96 frames. ], batch size: 65, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:00:04,824 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147763.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:00:09,335 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.722e+01 1.488e+02 1.756e+02 2.293e+02 4.562e+02, threshold=3.512e+02, percent-clipped=2.0 +2023-03-27 08:00:21,660 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2629, 2.0919, 1.8300, 1.8658, 2.2221, 1.9518, 2.3229, 2.2244], + device='cuda:5'), covar=tensor([0.1382, 0.2055, 0.2822, 0.2409, 0.2485, 0.1716, 0.2670, 0.1717], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0189, 0.0235, 0.0252, 0.0248, 0.0206, 0.0214, 0.0201], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:00:24,540 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147790.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:00:25,691 INFO [finetune.py:976] (5/7) Epoch 26, batch 4600, loss[loss=0.1804, simple_loss=0.2477, pruned_loss=0.05655, over 4881.00 frames. ], tot_loss[loss=0.172, simple_loss=0.245, pruned_loss=0.04953, over 953017.91 frames. ], batch size: 32, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:00:39,927 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147815.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:00:55,097 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0994, 1.8982, 2.5751, 4.2317, 2.8291, 2.7426, 0.9227, 3.6529], + device='cuda:5'), covar=tensor([0.1734, 0.1397, 0.1348, 0.0564, 0.0803, 0.1662, 0.2078, 0.0396], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0133, 0.0163, 0.0101, 0.0135, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 08:00:59,127 INFO [finetune.py:976] (5/7) Epoch 26, batch 4650, loss[loss=0.1563, simple_loss=0.2245, pruned_loss=0.04401, over 4926.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2428, pruned_loss=0.04925, over 954754.75 frames. ], batch size: 38, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:01:08,860 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147857.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:01:15,990 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.955e+01 1.458e+02 1.712e+02 2.175e+02 4.467e+02, threshold=3.424e+02, percent-clipped=3.0 +2023-03-27 08:01:21,830 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147876.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:01:39,632 INFO [finetune.py:976] (5/7) Epoch 26, batch 4700, loss[loss=0.1443, simple_loss=0.2151, pruned_loss=0.03673, over 4771.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.2389, pruned_loss=0.04777, over 956255.00 frames. ], batch size: 28, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:01:46,520 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147897.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:01:48,215 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5361, 1.4425, 1.4405, 1.4457, 0.9131, 2.3284, 0.7764, 1.2811], + device='cuda:5'), covar=tensor([0.3250, 0.2456, 0.2105, 0.2354, 0.1836, 0.0338, 0.2607, 0.1255], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0124, 0.0113, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 08:01:55,405 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147911.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:01:59,698 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147918.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:02:16,234 INFO [finetune.py:976] (5/7) Epoch 26, batch 4750, loss[loss=0.2112, simple_loss=0.2783, pruned_loss=0.07208, over 4692.00 frames. ], tot_loss[loss=0.1668, simple_loss=0.2379, pruned_loss=0.04788, over 954431.41 frames. ], batch size: 59, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:02:19,085 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=147945.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:02:32,361 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147966.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:02:34,091 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.372e+01 1.470e+02 1.654e+02 2.049e+02 2.990e+02, threshold=3.309e+02, percent-clipped=0.0 +2023-03-27 08:02:36,002 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147972.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:02:50,080 INFO [finetune.py:976] (5/7) Epoch 26, batch 4800, loss[loss=0.1937, simple_loss=0.2707, pruned_loss=0.05838, over 4820.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2411, pruned_loss=0.0489, over 955071.87 frames. ], batch size: 39, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:03:06,225 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148014.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:03:06,845 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148015.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:03:24,536 INFO [finetune.py:976] (5/7) Epoch 26, batch 4850, loss[loss=0.1652, simple_loss=0.246, pruned_loss=0.04214, over 4807.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2445, pruned_loss=0.04937, over 955263.21 frames. ], batch size: 45, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:03:26,333 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.8712, 3.3994, 3.5683, 3.7415, 3.6486, 3.4138, 3.9322, 1.1929], + device='cuda:5'), covar=tensor([0.1112, 0.1009, 0.1040, 0.1147, 0.1619, 0.1837, 0.1057, 0.5968], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0248, 0.0283, 0.0296, 0.0338, 0.0287, 0.0306, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:03:29,803 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3329, 4.5986, 4.8983, 5.1725, 5.0312, 4.7678, 5.4186, 1.6509], + device='cuda:5'), covar=tensor([0.0731, 0.0917, 0.0793, 0.0859, 0.1200, 0.1652, 0.0606, 0.6238], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0248, 0.0283, 0.0296, 0.0338, 0.0287, 0.0306, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:03:39,235 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148063.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:03:42,780 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.530e+02 1.908e+02 2.333e+02 3.886e+02, threshold=3.817e+02, percent-clipped=4.0 +2023-03-27 08:04:04,132 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148090.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:04:05,252 INFO [finetune.py:976] (5/7) Epoch 26, batch 4900, loss[loss=0.2368, simple_loss=0.2935, pruned_loss=0.09006, over 4130.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2464, pruned_loss=0.0501, over 953922.06 frames. ], batch size: 65, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:04:30,819 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148111.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:04:40,776 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-27 08:04:57,744 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148138.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:05:00,564 INFO [finetune.py:976] (5/7) Epoch 26, batch 4950, loss[loss=0.1765, simple_loss=0.2593, pruned_loss=0.04687, over 4887.00 frames. ], tot_loss[loss=0.1747, simple_loss=0.2483, pruned_loss=0.05059, over 954919.06 frames. ], batch size: 43, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:05:18,900 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.030e+01 1.572e+02 1.871e+02 2.257e+02 5.603e+02, threshold=3.742e+02, percent-clipped=1.0 +2023-03-27 08:05:20,236 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148171.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:05:21,480 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.2181, 4.5087, 4.7797, 5.0569, 4.9353, 4.6140, 5.2949, 1.6861], + device='cuda:5'), covar=tensor([0.0696, 0.0855, 0.0815, 0.0801, 0.1295, 0.1723, 0.0678, 0.6005], + device='cuda:5'), in_proj_covar=tensor([0.0355, 0.0250, 0.0285, 0.0299, 0.0341, 0.0289, 0.0308, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:05:33,994 INFO [finetune.py:976] (5/7) Epoch 26, batch 5000, loss[loss=0.1449, simple_loss=0.2096, pruned_loss=0.04015, over 4708.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.2456, pruned_loss=0.04945, over 956335.41 frames. ], batch size: 23, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:05:48,838 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148213.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:06:07,386 INFO [finetune.py:976] (5/7) Epoch 26, batch 5050, loss[loss=0.1498, simple_loss=0.2227, pruned_loss=0.03848, over 4768.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2424, pruned_loss=0.0481, over 956658.97 frames. ], batch size: 28, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:06:25,054 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148267.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:06:26,159 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.004e+02 1.490e+02 1.796e+02 2.082e+02 4.496e+02, threshold=3.592e+02, percent-clipped=3.0 +2023-03-27 08:06:37,576 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148287.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:06:40,479 INFO [finetune.py:976] (5/7) Epoch 26, batch 5100, loss[loss=0.1459, simple_loss=0.2229, pruned_loss=0.03442, over 4755.00 frames. ], tot_loss[loss=0.1671, simple_loss=0.2389, pruned_loss=0.04765, over 956330.25 frames. ], batch size: 28, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:07:00,785 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0286, 1.9348, 1.6407, 1.8717, 1.8306, 1.8321, 1.9149, 2.5398], + device='cuda:5'), covar=tensor([0.3574, 0.3789, 0.3258, 0.3441, 0.3706, 0.2292, 0.3425, 0.1623], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0265, 0.0237, 0.0276, 0.0260, 0.0230, 0.0258, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:07:02,510 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148315.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:07:22,780 INFO [finetune.py:976] (5/7) Epoch 26, batch 5150, loss[loss=0.1738, simple_loss=0.2461, pruned_loss=0.05079, over 4851.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.24, pruned_loss=0.04866, over 957458.17 frames. ], batch size: 44, lr: 2.95e-03, grad_scale: 32.0 +2023-03-27 08:07:27,032 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148348.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:07:36,972 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148363.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:07:41,446 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.549e+02 1.828e+02 2.233e+02 5.689e+02, threshold=3.657e+02, percent-clipped=4.0 +2023-03-27 08:07:55,905 INFO [finetune.py:976] (5/7) Epoch 26, batch 5200, loss[loss=0.2015, simple_loss=0.2792, pruned_loss=0.06191, over 4873.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.2444, pruned_loss=0.05012, over 954030.03 frames. ], batch size: 44, lr: 2.95e-03, grad_scale: 16.0 +2023-03-27 08:08:15,171 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.72 vs. limit=5.0 +2023-03-27 08:08:21,969 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148429.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:08:21,999 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8502, 1.6631, 1.4600, 1.3231, 1.5673, 1.5468, 1.6254, 2.1885], + device='cuda:5'), covar=tensor([0.3347, 0.3360, 0.2950, 0.3185, 0.3504, 0.2284, 0.3014, 0.1701], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0265, 0.0237, 0.0276, 0.0260, 0.0229, 0.0258, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:08:29,696 INFO [finetune.py:976] (5/7) Epoch 26, batch 5250, loss[loss=0.1506, simple_loss=0.2279, pruned_loss=0.03665, over 4754.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2456, pruned_loss=0.05039, over 953392.61 frames. ], batch size: 28, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:08:48,648 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.102e+02 1.514e+02 1.742e+02 2.193e+02 4.299e+02, threshold=3.484e+02, percent-clipped=1.0 +2023-03-27 08:08:49,336 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148471.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:09:02,323 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148490.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:09:03,426 INFO [finetune.py:976] (5/7) Epoch 26, batch 5300, loss[loss=0.1513, simple_loss=0.223, pruned_loss=0.03976, over 4816.00 frames. ], tot_loss[loss=0.1742, simple_loss=0.2466, pruned_loss=0.0509, over 953654.46 frames. ], batch size: 25, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:09:07,097 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148498.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:09:20,035 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148513.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:09:28,244 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.07 vs. limit=2.0 +2023-03-27 08:09:28,389 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148519.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:09:54,003 INFO [finetune.py:976] (5/7) Epoch 26, batch 5350, loss[loss=0.15, simple_loss=0.2211, pruned_loss=0.03943, over 4728.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2451, pruned_loss=0.04936, over 952519.96 frames. ], batch size: 59, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:10:12,428 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148559.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:10:13,591 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148561.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:10:17,257 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 08:10:17,757 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148567.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:10:19,465 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.412e+02 1.653e+02 1.941e+02 3.220e+02, threshold=3.306e+02, percent-clipped=0.0 +2023-03-27 08:10:34,701 INFO [finetune.py:976] (5/7) Epoch 26, batch 5400, loss[loss=0.1702, simple_loss=0.2327, pruned_loss=0.05383, over 4761.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2434, pruned_loss=0.04927, over 952005.58 frames. ], batch size: 27, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:10:46,628 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-27 08:10:49,745 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148615.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:10:59,482 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-03-27 08:11:07,908 INFO [finetune.py:976] (5/7) Epoch 26, batch 5450, loss[loss=0.1274, simple_loss=0.2039, pruned_loss=0.02546, over 4751.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2414, pruned_loss=0.04951, over 952412.41 frames. ], batch size: 27, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:11:08,562 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148643.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:11:13,486 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148651.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:11:25,816 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.571e+02 1.855e+02 2.174e+02 4.125e+02, threshold=3.711e+02, percent-clipped=2.0 +2023-03-27 08:11:41,099 INFO [finetune.py:976] (5/7) Epoch 26, batch 5500, loss[loss=0.1413, simple_loss=0.2301, pruned_loss=0.02627, over 4934.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.238, pruned_loss=0.04821, over 953469.22 frames. ], batch size: 33, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:11:53,977 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148712.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:11:56,410 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148716.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:12:22,375 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0974, 2.7571, 2.8855, 3.0167, 2.9096, 2.7243, 3.1431, 1.1492], + device='cuda:5'), covar=tensor([0.1105, 0.1180, 0.1120, 0.1219, 0.1521, 0.1872, 0.1297, 0.5392], + device='cuda:5'), in_proj_covar=tensor([0.0354, 0.0250, 0.0284, 0.0298, 0.0338, 0.0290, 0.0307, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:12:24,627 INFO [finetune.py:976] (5/7) Epoch 26, batch 5550, loss[loss=0.1886, simple_loss=0.2717, pruned_loss=0.05277, over 4813.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2392, pruned_loss=0.04828, over 953449.11 frames. ], batch size: 38, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:12:42,333 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.664e+01 1.578e+02 1.917e+02 2.288e+02 4.413e+02, threshold=3.834e+02, percent-clipped=2.0 +2023-03-27 08:12:47,515 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148777.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:12:52,458 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148785.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:12:56,501 INFO [finetune.py:976] (5/7) Epoch 26, batch 5600, loss[loss=0.1672, simple_loss=0.2486, pruned_loss=0.04289, over 4781.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.242, pruned_loss=0.04879, over 952811.91 frames. ], batch size: 26, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:13:00,681 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7526, 1.7926, 1.6982, 1.7834, 1.3408, 4.3211, 1.7214, 2.0109], + device='cuda:5'), covar=tensor([0.3419, 0.2623, 0.2120, 0.2390, 0.1702, 0.0127, 0.2352, 0.1233], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0112, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0005, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 08:13:03,755 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3216, 1.3244, 1.3642, 0.8438, 1.3805, 1.6882, 1.6761, 1.2950], + device='cuda:5'), covar=tensor([0.0876, 0.0722, 0.0522, 0.0521, 0.0469, 0.0570, 0.0323, 0.0721], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0149, 0.0129, 0.0123, 0.0131, 0.0130, 0.0143, 0.0150], + device='cuda:5'), out_proj_covar=tensor([8.9030e-05, 1.0686e-04, 9.1665e-05, 8.6486e-05, 9.1921e-05, 9.2044e-05, + 1.0147e-04, 1.0741e-04], device='cuda:5') +2023-03-27 08:13:19,446 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5027, 2.1440, 2.5811, 2.5681, 2.2698, 2.2753, 2.5336, 2.4602], + device='cuda:5'), covar=tensor([0.4015, 0.4034, 0.3074, 0.3592, 0.4958, 0.3630, 0.4440, 0.2913], + device='cuda:5'), in_proj_covar=tensor([0.0264, 0.0246, 0.0266, 0.0294, 0.0294, 0.0270, 0.0299, 0.0251], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:13:25,691 INFO [finetune.py:976] (5/7) Epoch 26, batch 5650, loss[loss=0.173, simple_loss=0.2542, pruned_loss=0.04586, over 4834.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2452, pruned_loss=0.0496, over 953276.89 frames. ], batch size: 47, lr: 2.94e-03, grad_scale: 16.0 +2023-03-27 08:13:32,822 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148854.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:13:42,328 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.500e+02 1.770e+02 2.150e+02 4.859e+02, threshold=3.539e+02, percent-clipped=1.0 +2023-03-27 08:13:55,309 INFO [finetune.py:976] (5/7) Epoch 26, batch 5700, loss[loss=0.1667, simple_loss=0.2286, pruned_loss=0.0524, over 4116.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.241, pruned_loss=0.04922, over 930373.72 frames. ], batch size: 18, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:14:00,723 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6656, 2.4740, 2.7628, 1.4071, 3.0321, 3.2790, 2.7186, 2.3406], + device='cuda:5'), covar=tensor([0.0766, 0.0697, 0.0407, 0.0662, 0.0453, 0.0538, 0.0448, 0.0768], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0149, 0.0129, 0.0123, 0.0131, 0.0130, 0.0143, 0.0150], + device='cuda:5'), out_proj_covar=tensor([8.9138e-05, 1.0682e-04, 9.1777e-05, 8.6636e-05, 9.1837e-05, 9.2042e-05, + 1.0150e-04, 1.0745e-04], device='cuda:5') +2023-03-27 08:14:24,193 INFO [finetune.py:976] (5/7) Epoch 27, batch 0, loss[loss=0.2047, simple_loss=0.2826, pruned_loss=0.06347, over 4909.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2826, pruned_loss=0.06347, over 4909.00 frames. ], batch size: 38, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:14:24,193 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 08:14:30,265 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6865, 1.1989, 0.9100, 1.6559, 2.1835, 1.1529, 1.5406, 1.5443], + device='cuda:5'), covar=tensor([0.1624, 0.2110, 0.1868, 0.1211, 0.1865, 0.2140, 0.1416, 0.2191], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0092, 0.0120, 0.0094, 0.0099, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 08:14:30,734 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5069, 1.3941, 1.3553, 1.4537, 1.7076, 1.6995, 1.4493, 1.2987], + device='cuda:5'), covar=tensor([0.0544, 0.0308, 0.0615, 0.0336, 0.0358, 0.0315, 0.0351, 0.0433], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0105, 0.0146, 0.0110, 0.0100, 0.0114, 0.0103, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7685e-05, 8.0337e-05, 1.1349e-04, 8.4398e-05, 7.7821e-05, 8.4339e-05, + 7.6199e-05, 8.4954e-05], device='cuda:5') +2023-03-27 08:14:40,695 INFO [finetune.py:1010] (5/7) Epoch 27, validation: loss=0.1593, simple_loss=0.2269, pruned_loss=0.04586, over 2265189.00 frames. +2023-03-27 08:14:40,695 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 08:14:57,040 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148943.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:15:27,447 INFO [finetune.py:976] (5/7) Epoch 27, batch 50, loss[loss=0.2407, simple_loss=0.3052, pruned_loss=0.08815, over 4783.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2459, pruned_loss=0.05054, over 215814.92 frames. ], batch size: 51, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:15:28,081 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.253e+01 1.427e+02 1.731e+02 2.058e+02 3.661e+02, threshold=3.462e+02, percent-clipped=4.0 +2023-03-27 08:15:37,744 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-27 08:15:41,368 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-03-27 08:15:44,090 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=148991.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:15:49,817 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5879, 2.5415, 2.1281, 1.1349, 2.2024, 1.9814, 1.8354, 2.2991], + device='cuda:5'), covar=tensor([0.0798, 0.0683, 0.1563, 0.1878, 0.1360, 0.2224, 0.2021, 0.0866], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0190, 0.0199, 0.0181, 0.0208, 0.0210, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:15:54,004 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149007.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:16:03,651 INFO [finetune.py:976] (5/7) Epoch 27, batch 100, loss[loss=0.1641, simple_loss=0.2416, pruned_loss=0.0433, over 4707.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2408, pruned_loss=0.04924, over 381380.79 frames. ], batch size: 59, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:16:10,121 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5749, 1.5025, 1.4656, 1.5028, 1.0904, 2.9160, 1.0799, 1.5522], + device='cuda:5'), covar=tensor([0.3233, 0.2456, 0.2148, 0.2429, 0.1816, 0.0259, 0.2619, 0.1206], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0124, 0.0113, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 08:16:13,184 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9604, 2.6681, 2.5329, 1.3555, 2.7023, 2.2480, 2.1094, 2.4763], + device='cuda:5'), covar=tensor([0.1493, 0.0896, 0.2051, 0.2123, 0.1725, 0.2008, 0.2266, 0.1223], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0190, 0.0198, 0.0181, 0.0208, 0.0209, 0.0222, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:16:36,428 INFO [finetune.py:976] (5/7) Epoch 27, batch 150, loss[loss=0.1719, simple_loss=0.2361, pruned_loss=0.05384, over 4902.00 frames. ], tot_loss[loss=0.1662, simple_loss=0.2368, pruned_loss=0.04773, over 508691.32 frames. ], batch size: 36, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:16:37,490 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.570e+01 1.451e+02 1.770e+02 2.054e+02 3.397e+02, threshold=3.539e+02, percent-clipped=0.0 +2023-03-27 08:16:39,140 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149072.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:16:47,475 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149085.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:16:52,076 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-03-27 08:17:09,499 INFO [finetune.py:976] (5/7) Epoch 27, batch 200, loss[loss=0.1476, simple_loss=0.2321, pruned_loss=0.03157, over 4808.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2368, pruned_loss=0.04791, over 609468.65 frames. ], batch size: 25, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:17:11,266 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7665, 1.5000, 2.3002, 3.5079, 2.3412, 2.5213, 1.2195, 2.9745], + device='cuda:5'), covar=tensor([0.1714, 0.1443, 0.1278, 0.0618, 0.0843, 0.1400, 0.1760, 0.0480], + device='cuda:5'), in_proj_covar=tensor([0.0098, 0.0114, 0.0131, 0.0162, 0.0099, 0.0134, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 08:17:19,406 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=149133.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:17:22,558 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3952, 1.9971, 2.3786, 2.4137, 2.0927, 2.1088, 2.3205, 2.1971], + device='cuda:5'), covar=tensor([0.3939, 0.4086, 0.3281, 0.3718, 0.4985, 0.3973, 0.4789, 0.3073], + device='cuda:5'), in_proj_covar=tensor([0.0266, 0.0247, 0.0267, 0.0295, 0.0295, 0.0271, 0.0301, 0.0252], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:17:39,165 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149154.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:17:52,878 INFO [finetune.py:976] (5/7) Epoch 27, batch 250, loss[loss=0.2166, simple_loss=0.2883, pruned_loss=0.07244, over 4856.00 frames. ], tot_loss[loss=0.1658, simple_loss=0.2366, pruned_loss=0.0475, over 686309.99 frames. ], batch size: 44, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:17:53,482 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.507e+01 1.547e+02 1.763e+02 2.073e+02 3.560e+02, threshold=3.526e+02, percent-clipped=1.0 +2023-03-27 08:18:14,784 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=149202.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:18:25,586 INFO [finetune.py:976] (5/7) Epoch 27, batch 300, loss[loss=0.1951, simple_loss=0.2765, pruned_loss=0.05682, over 4814.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2426, pruned_loss=0.04904, over 747864.02 frames. ], batch size: 38, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:18:58,795 INFO [finetune.py:976] (5/7) Epoch 27, batch 350, loss[loss=0.1698, simple_loss=0.2488, pruned_loss=0.04537, over 4800.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2434, pruned_loss=0.04885, over 794883.05 frames. ], batch size: 25, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:18:59,399 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.129e+02 1.601e+02 1.876e+02 2.140e+02 5.128e+02, threshold=3.753e+02, percent-clipped=1.0 +2023-03-27 08:19:24,866 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149307.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:19:25,470 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.3906, 4.7183, 4.9473, 5.2176, 5.0449, 4.7613, 5.5446, 1.6663], + device='cuda:5'), covar=tensor([0.0790, 0.0811, 0.0722, 0.0888, 0.1373, 0.1614, 0.0526, 0.6078], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0248, 0.0284, 0.0296, 0.0336, 0.0288, 0.0304, 0.0299], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:19:32,720 INFO [finetune.py:976] (5/7) Epoch 27, batch 400, loss[loss=0.1783, simple_loss=0.2517, pruned_loss=0.05245, over 4698.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2448, pruned_loss=0.04937, over 827878.19 frames. ], batch size: 59, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:19:53,717 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5024, 1.4246, 1.9654, 2.9357, 1.9694, 2.1565, 1.1610, 2.5470], + device='cuda:5'), covar=tensor([0.1728, 0.1422, 0.1127, 0.0576, 0.0865, 0.1455, 0.1556, 0.0552], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0100, 0.0135, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 08:20:03,791 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5453, 1.5983, 1.3572, 1.5486, 1.9099, 1.8670, 1.5463, 1.4282], + device='cuda:5'), covar=tensor([0.0376, 0.0337, 0.0612, 0.0319, 0.0215, 0.0478, 0.0357, 0.0413], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0105, 0.0146, 0.0111, 0.0101, 0.0115, 0.0103, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7670e-05, 8.0649e-05, 1.1386e-04, 8.4714e-05, 7.8015e-05, 8.4485e-05, + 7.6825e-05, 8.4839e-05], device='cuda:5') +2023-03-27 08:20:07,341 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=149355.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:20:08,590 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7972, 1.6922, 2.1432, 3.4862, 2.3981, 2.4183, 1.1914, 2.9856], + device='cuda:5'), covar=tensor([0.1581, 0.1295, 0.1265, 0.0491, 0.0731, 0.1432, 0.1745, 0.0380], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0100, 0.0135, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 08:20:16,469 INFO [finetune.py:976] (5/7) Epoch 27, batch 450, loss[loss=0.1817, simple_loss=0.2509, pruned_loss=0.05626, over 4747.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.2429, pruned_loss=0.04894, over 855225.03 frames. ], batch size: 26, lr: 2.94e-03, grad_scale: 8.0 +2023-03-27 08:20:16,817 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-27 08:20:17,066 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.956e+01 1.496e+02 1.736e+02 2.126e+02 4.914e+02, threshold=3.471e+02, percent-clipped=1.0 +2023-03-27 08:20:17,793 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149372.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:20:32,774 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.66 vs. limit=5.0 +2023-03-27 08:20:51,768 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4266, 3.8214, 4.0494, 4.2555, 4.1348, 3.9121, 4.4985, 1.5515], + device='cuda:5'), covar=tensor([0.0852, 0.0979, 0.0928, 0.1026, 0.1372, 0.1733, 0.0745, 0.5837], + device='cuda:5'), in_proj_covar=tensor([0.0352, 0.0249, 0.0285, 0.0298, 0.0338, 0.0289, 0.0306, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:21:04,695 INFO [finetune.py:976] (5/7) Epoch 27, batch 500, loss[loss=0.2208, simple_loss=0.2739, pruned_loss=0.0838, over 4813.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2411, pruned_loss=0.04886, over 879501.73 frames. ], batch size: 39, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:21:04,764 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=149420.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:21:17,766 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 08:21:30,146 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.21 vs. limit=5.0 +2023-03-27 08:21:33,305 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.86 vs. limit=5.0 +2023-03-27 08:21:38,465 INFO [finetune.py:976] (5/7) Epoch 27, batch 550, loss[loss=0.1854, simple_loss=0.2544, pruned_loss=0.05819, over 4898.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2393, pruned_loss=0.04867, over 897139.71 frames. ], batch size: 36, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:21:39,002 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-27 08:21:39,061 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.918e+01 1.466e+02 1.717e+02 2.125e+02 3.295e+02, threshold=3.435e+02, percent-clipped=0.0 +2023-03-27 08:22:08,674 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149514.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:22:12,134 INFO [finetune.py:976] (5/7) Epoch 27, batch 600, loss[loss=0.1363, simple_loss=0.2152, pruned_loss=0.02869, over 4799.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.241, pruned_loss=0.04964, over 906715.17 frames. ], batch size: 29, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:22:40,015 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8348, 1.7732, 2.1152, 3.1395, 2.2548, 2.3856, 1.3864, 2.7298], + device='cuda:5'), covar=tensor([0.1417, 0.1057, 0.1095, 0.0522, 0.0685, 0.1442, 0.1423, 0.0442], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0131, 0.0163, 0.0100, 0.0135, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 08:22:45,172 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149565.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:22:48,088 INFO [finetune.py:976] (5/7) Epoch 27, batch 650, loss[loss=0.1984, simple_loss=0.2674, pruned_loss=0.06468, over 4903.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2429, pruned_loss=0.04931, over 916958.64 frames. ], batch size: 36, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:22:53,184 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.592e+02 1.975e+02 2.434e+02 4.045e+02, threshold=3.949e+02, percent-clipped=4.0 +2023-03-27 08:22:55,795 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149575.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:23:29,820 INFO [finetune.py:976] (5/7) Epoch 27, batch 700, loss[loss=0.2368, simple_loss=0.3034, pruned_loss=0.08505, over 4886.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2439, pruned_loss=0.04922, over 924896.34 frames. ], batch size: 35, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:23:33,656 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149626.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 08:23:34,257 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5863, 1.6193, 1.7360, 0.8793, 1.8702, 2.0990, 2.0061, 1.5182], + device='cuda:5'), covar=tensor([0.0878, 0.0694, 0.0506, 0.0608, 0.0491, 0.0623, 0.0360, 0.0762], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0147, 0.0128, 0.0122, 0.0130, 0.0129, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([8.8205e-05, 1.0592e-04, 9.1076e-05, 8.6032e-05, 9.1016e-05, 9.1457e-05, + 1.0076e-04, 1.0694e-04], device='cuda:5') +2023-03-27 08:23:49,512 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7965, 1.9800, 1.5940, 1.6068, 2.3199, 2.1837, 1.9011, 1.7993], + device='cuda:5'), covar=tensor([0.0381, 0.0320, 0.0639, 0.0358, 0.0249, 0.0769, 0.0362, 0.0399], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0106, 0.0146, 0.0111, 0.0101, 0.0115, 0.0103, 0.0112], + device='cuda:5'), out_proj_covar=tensor([7.7944e-05, 8.0678e-05, 1.1404e-04, 8.4701e-05, 7.8103e-05, 8.4929e-05, + 7.6760e-05, 8.5247e-05], device='cuda:5') +2023-03-27 08:23:55,091 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-03-27 08:24:03,075 INFO [finetune.py:976] (5/7) Epoch 27, batch 750, loss[loss=0.1564, simple_loss=0.232, pruned_loss=0.04042, over 4897.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.2442, pruned_loss=0.04899, over 930612.92 frames. ], batch size: 43, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:24:03,700 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.520e+02 1.783e+02 2.094e+02 3.998e+02, threshold=3.567e+02, percent-clipped=1.0 +2023-03-27 08:24:36,878 INFO [finetune.py:976] (5/7) Epoch 27, batch 800, loss[loss=0.15, simple_loss=0.228, pruned_loss=0.03604, over 4849.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2438, pruned_loss=0.04891, over 935832.86 frames. ], batch size: 44, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:24:47,266 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7917, 1.7348, 1.9140, 1.2697, 1.8680, 1.9431, 1.8874, 1.5071], + device='cuda:5'), covar=tensor([0.0528, 0.0618, 0.0579, 0.0764, 0.0736, 0.0567, 0.0567, 0.1133], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0137, 0.0141, 0.0119, 0.0129, 0.0139, 0.0141, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:24:54,913 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149746.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 08:25:15,262 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2165, 2.2577, 1.9245, 2.4128, 2.7793, 2.3364, 2.6341, 1.6944], + device='cuda:5'), covar=tensor([0.2431, 0.1919, 0.2073, 0.1665, 0.1863, 0.1163, 0.1838, 0.2142], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0210, 0.0214, 0.0197, 0.0244, 0.0191, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:25:15,880 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3133, 2.1585, 2.0159, 2.3323, 2.7925, 2.2661, 2.4397, 1.8085], + device='cuda:5'), covar=tensor([0.2079, 0.1870, 0.1774, 0.1516, 0.1556, 0.1174, 0.1817, 0.1810], + device='cuda:5'), in_proj_covar=tensor([0.0244, 0.0210, 0.0214, 0.0197, 0.0244, 0.0191, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:25:20,610 INFO [finetune.py:976] (5/7) Epoch 27, batch 850, loss[loss=0.1627, simple_loss=0.2423, pruned_loss=0.04152, over 4753.00 frames. ], tot_loss[loss=0.1689, simple_loss=0.2416, pruned_loss=0.04804, over 939445.06 frames. ], batch size: 54, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:25:21,213 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.021e+02 1.421e+02 1.714e+02 1.950e+02 4.580e+02, threshold=3.429e+02, percent-clipped=2.0 +2023-03-27 08:25:47,370 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.54 vs. limit=5.0 +2023-03-27 08:25:56,770 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149807.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 08:26:07,326 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6617, 1.4965, 2.0218, 3.4223, 2.3594, 2.3968, 1.0952, 2.9751], + device='cuda:5'), covar=tensor([0.1644, 0.1339, 0.1316, 0.0548, 0.0751, 0.1563, 0.1682, 0.0444], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0101, 0.0136, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 08:26:09,035 INFO [finetune.py:976] (5/7) Epoch 27, batch 900, loss[loss=0.1767, simple_loss=0.2405, pruned_loss=0.05645, over 4633.00 frames. ], tot_loss[loss=0.1665, simple_loss=0.2383, pruned_loss=0.04732, over 939441.13 frames. ], batch size: 23, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:26:09,137 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5192, 1.5027, 1.9531, 1.6964, 1.5392, 3.4401, 1.3339, 1.6139], + device='cuda:5'), covar=tensor([0.0986, 0.1758, 0.1240, 0.0924, 0.1591, 0.0217, 0.1496, 0.1758], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0081, 0.0073, 0.0076, 0.0090, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 08:26:42,227 INFO [finetune.py:976] (5/7) Epoch 27, batch 950, loss[loss=0.1727, simple_loss=0.2408, pruned_loss=0.05235, over 4852.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2383, pruned_loss=0.04812, over 942279.43 frames. ], batch size: 49, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:26:42,298 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149870.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:26:42,815 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.619e+01 1.503e+02 1.866e+02 2.296e+02 3.689e+02, threshold=3.732e+02, percent-clipped=3.0 +2023-03-27 08:27:15,535 INFO [finetune.py:976] (5/7) Epoch 27, batch 1000, loss[loss=0.1652, simple_loss=0.2487, pruned_loss=0.04083, over 4819.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.24, pruned_loss=0.04813, over 945271.56 frames. ], batch size: 30, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:27:16,181 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149921.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 08:27:48,833 INFO [finetune.py:976] (5/7) Epoch 27, batch 1050, loss[loss=0.2163, simple_loss=0.2842, pruned_loss=0.07418, over 4850.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2428, pruned_loss=0.04938, over 945548.41 frames. ], batch size: 44, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:27:49,417 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.561e+02 1.767e+02 2.240e+02 3.870e+02, threshold=3.534e+02, percent-clipped=1.0 +2023-03-27 08:28:33,251 INFO [finetune.py:976] (5/7) Epoch 27, batch 1100, loss[loss=0.14, simple_loss=0.2268, pruned_loss=0.02662, over 4912.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2441, pruned_loss=0.04954, over 947889.77 frames. ], batch size: 36, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:28:54,448 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3791, 2.3111, 1.9270, 2.4971, 2.3894, 2.0535, 2.7174, 2.3811], + device='cuda:5'), covar=tensor([0.1316, 0.2074, 0.2945, 0.2365, 0.2469, 0.1728, 0.3017, 0.1734], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0189, 0.0237, 0.0253, 0.0248, 0.0207, 0.0214, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:28:56,796 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2697, 3.6925, 3.9363, 4.1376, 4.0440, 3.7272, 4.3484, 1.3120], + device='cuda:5'), covar=tensor([0.0730, 0.0831, 0.0827, 0.0875, 0.1162, 0.1598, 0.0650, 0.5963], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0247, 0.0282, 0.0295, 0.0334, 0.0286, 0.0304, 0.0301], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:29:06,474 INFO [finetune.py:976] (5/7) Epoch 27, batch 1150, loss[loss=0.1683, simple_loss=0.2335, pruned_loss=0.05153, over 4886.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.2464, pruned_loss=0.05056, over 950095.54 frames. ], batch size: 35, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:29:07,081 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.913e+01 1.470e+02 1.766e+02 2.217e+02 3.439e+02, threshold=3.531e+02, percent-clipped=0.0 +2023-03-27 08:29:13,024 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150079.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:29:26,980 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150102.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 08:29:33,059 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6525, 3.9525, 3.7288, 2.0507, 4.0658, 2.9407, 1.2506, 2.8409], + device='cuda:5'), covar=tensor([0.2387, 0.1724, 0.1688, 0.2998, 0.0901, 0.1016, 0.3743, 0.1311], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0179, 0.0160, 0.0129, 0.0161, 0.0124, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 08:29:39,271 INFO [finetune.py:976] (5/7) Epoch 27, batch 1200, loss[loss=0.1622, simple_loss=0.2359, pruned_loss=0.04431, over 4897.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2444, pruned_loss=0.04995, over 951461.31 frames. ], batch size: 36, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:29:52,968 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150140.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:30:14,459 INFO [finetune.py:976] (5/7) Epoch 27, batch 1250, loss[loss=0.2079, simple_loss=0.236, pruned_loss=0.0899, over 4049.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2423, pruned_loss=0.04939, over 952532.40 frames. ], batch size: 17, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:30:15,060 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150170.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:30:15,534 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.554e+02 1.886e+02 2.235e+02 6.588e+02, threshold=3.772e+02, percent-clipped=2.0 +2023-03-27 08:30:26,186 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-27 08:30:35,253 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7119, 2.4154, 2.0368, 1.0145, 2.1953, 2.1121, 1.9017, 2.2567], + device='cuda:5'), covar=tensor([0.0938, 0.0905, 0.1773, 0.2169, 0.1613, 0.2231, 0.2324, 0.1063], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0192, 0.0201, 0.0182, 0.0209, 0.0211, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:30:56,392 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=150218.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:30:57,558 INFO [finetune.py:976] (5/7) Epoch 27, batch 1300, loss[loss=0.1177, simple_loss=0.1973, pruned_loss=0.01899, over 4832.00 frames. ], tot_loss[loss=0.1669, simple_loss=0.2384, pruned_loss=0.04771, over 951295.18 frames. ], batch size: 30, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:31:03,326 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150221.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:31:37,441 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.5164, 1.4852, 1.4771, 0.9163, 1.6034, 1.8692, 1.8155, 1.4136], + device='cuda:5'), covar=tensor([0.0937, 0.0708, 0.0577, 0.0583, 0.0474, 0.0575, 0.0335, 0.0795], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0128, 0.0123, 0.0130, 0.0130, 0.0142, 0.0150], + device='cuda:5'), out_proj_covar=tensor([8.8647e-05, 1.0591e-04, 9.1289e-05, 8.6387e-05, 9.1325e-05, 9.1664e-05, + 1.0084e-04, 1.0736e-04], device='cuda:5') +2023-03-27 08:31:42,216 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=150269.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:31:42,764 INFO [finetune.py:976] (5/7) Epoch 27, batch 1350, loss[loss=0.1624, simple_loss=0.219, pruned_loss=0.05294, over 4698.00 frames. ], tot_loss[loss=0.167, simple_loss=0.2382, pruned_loss=0.04789, over 952421.64 frames. ], batch size: 23, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:31:43,346 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.453e+02 1.768e+02 2.125e+02 3.830e+02, threshold=3.537e+02, percent-clipped=1.0 +2023-03-27 08:31:46,301 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150274.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:32:04,659 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.41 vs. limit=5.0 +2023-03-27 08:32:16,595 INFO [finetune.py:976] (5/7) Epoch 27, batch 1400, loss[loss=0.1608, simple_loss=0.2438, pruned_loss=0.03887, over 4873.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2407, pruned_loss=0.04812, over 955137.77 frames. ], batch size: 34, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:32:28,258 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150335.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:32:36,629 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0398, 2.6787, 2.5745, 1.3380, 2.6842, 2.2234, 2.1732, 2.5173], + device='cuda:5'), covar=tensor([0.1063, 0.0799, 0.1783, 0.2307, 0.1708, 0.2336, 0.2057, 0.1255], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0191, 0.0200, 0.0181, 0.0208, 0.0211, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:32:40,234 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3998, 1.2764, 1.2445, 1.2942, 1.6249, 1.5698, 1.3738, 1.2150], + device='cuda:5'), covar=tensor([0.0355, 0.0316, 0.0610, 0.0316, 0.0221, 0.0433, 0.0336, 0.0430], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0105, 0.0146, 0.0111, 0.0100, 0.0115, 0.0103, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7772e-05, 8.0639e-05, 1.1357e-04, 8.4556e-05, 7.7810e-05, 8.4787e-05, + 7.6725e-05, 8.5407e-05], device='cuda:5') +2023-03-27 08:32:49,840 INFO [finetune.py:976] (5/7) Epoch 27, batch 1450, loss[loss=0.1579, simple_loss=0.233, pruned_loss=0.04142, over 4795.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2424, pruned_loss=0.04806, over 956024.56 frames. ], batch size: 26, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:32:50,438 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.045e+02 1.587e+02 1.925e+02 2.309e+02 4.827e+02, threshold=3.851e+02, percent-clipped=3.0 +2023-03-27 08:33:02,111 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5821, 1.5595, 1.4719, 1.6446, 1.2317, 3.5635, 1.2905, 1.7900], + device='cuda:5'), covar=tensor([0.3370, 0.2502, 0.2196, 0.2421, 0.1675, 0.0186, 0.2729, 0.1299], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0121, 0.0124, 0.0113, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 08:33:13,555 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150402.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 08:33:29,004 INFO [finetune.py:976] (5/7) Epoch 27, batch 1500, loss[loss=0.2077, simple_loss=0.2725, pruned_loss=0.07146, over 4911.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.2451, pruned_loss=0.04984, over 955541.27 frames. ], batch size: 37, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:33:42,992 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150435.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:33:49,512 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6462, 1.2461, 0.8643, 1.5212, 2.0507, 1.5438, 1.5535, 1.6070], + device='cuda:5'), covar=tensor([0.1632, 0.2154, 0.1966, 0.1288, 0.2021, 0.1975, 0.1471, 0.2034], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0091, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 08:33:53,600 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=150450.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 08:34:05,548 INFO [finetune.py:976] (5/7) Epoch 27, batch 1550, loss[loss=0.1526, simple_loss=0.2263, pruned_loss=0.03939, over 4770.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2455, pruned_loss=0.04999, over 957072.60 frames. ], batch size: 26, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:34:06,130 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.290e+01 1.580e+02 1.863e+02 2.206e+02 4.598e+02, threshold=3.727e+02, percent-clipped=2.0 +2023-03-27 08:34:14,094 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1077, 2.0583, 2.1714, 1.3974, 2.0880, 2.1570, 2.1686, 1.7614], + device='cuda:5'), covar=tensor([0.0556, 0.0667, 0.0640, 0.0862, 0.0696, 0.0636, 0.0624, 0.1170], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0137, 0.0142, 0.0120, 0.0128, 0.0139, 0.0141, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:34:38,731 INFO [finetune.py:976] (5/7) Epoch 27, batch 1600, loss[loss=0.1756, simple_loss=0.246, pruned_loss=0.05259, over 4913.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2436, pruned_loss=0.04906, over 957713.21 frames. ], batch size: 43, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:34:50,082 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150537.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:35:11,519 INFO [finetune.py:976] (5/7) Epoch 27, batch 1650, loss[loss=0.1249, simple_loss=0.1968, pruned_loss=0.02652, over 4782.00 frames. ], tot_loss[loss=0.1688, simple_loss=0.241, pruned_loss=0.04829, over 958762.82 frames. ], batch size: 29, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:35:11,656 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0844, 0.9453, 0.9188, 1.1764, 1.2392, 1.2080, 1.0063, 0.9446], + device='cuda:5'), covar=tensor([0.0470, 0.0360, 0.0698, 0.0327, 0.0351, 0.0408, 0.0399, 0.0450], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0106, 0.0146, 0.0111, 0.0101, 0.0115, 0.0104, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7689e-05, 8.0848e-05, 1.1382e-04, 8.4640e-05, 7.7959e-05, 8.4976e-05, + 7.6980e-05, 8.5676e-05], device='cuda:5') +2023-03-27 08:35:12,138 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.130e+02 1.535e+02 1.741e+02 2.182e+02 5.670e+02, threshold=3.482e+02, percent-clipped=1.0 +2023-03-27 08:35:37,666 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150598.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:35:45,405 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5743, 2.8967, 2.6709, 1.8526, 2.6675, 2.9111, 2.8507, 2.3807], + device='cuda:5'), covar=tensor([0.0591, 0.0521, 0.0656, 0.0833, 0.0714, 0.0638, 0.0585, 0.0961], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0137, 0.0141, 0.0119, 0.0128, 0.0138, 0.0140, 0.0161], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:35:54,938 INFO [finetune.py:976] (5/7) Epoch 27, batch 1700, loss[loss=0.1613, simple_loss=0.2362, pruned_loss=0.04316, over 4915.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2395, pruned_loss=0.04836, over 958196.32 frames. ], batch size: 36, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:35:55,690 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4320, 2.4101, 2.0544, 2.3930, 2.3789, 2.3039, 2.2964, 3.2355], + device='cuda:5'), covar=tensor([0.3539, 0.4740, 0.3368, 0.4152, 0.3984, 0.2578, 0.4367, 0.1586], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0265, 0.0237, 0.0277, 0.0260, 0.0230, 0.0260, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:36:01,036 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150630.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:36:41,949 INFO [finetune.py:976] (5/7) Epoch 27, batch 1750, loss[loss=0.1399, simple_loss=0.2092, pruned_loss=0.03533, over 4761.00 frames. ], tot_loss[loss=0.169, simple_loss=0.241, pruned_loss=0.04852, over 958120.36 frames. ], batch size: 26, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:36:42,539 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.512e+01 1.530e+02 1.821e+02 2.198e+02 3.521e+02, threshold=3.642e+02, percent-clipped=1.0 +2023-03-27 08:36:50,599 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3055, 1.3520, 1.3300, 0.9296, 1.3904, 1.5851, 1.6647, 1.2500], + device='cuda:5'), covar=tensor([0.0996, 0.0669, 0.0614, 0.0441, 0.0507, 0.0618, 0.0366, 0.0781], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0147, 0.0128, 0.0122, 0.0130, 0.0129, 0.0141, 0.0149], + device='cuda:5'), out_proj_covar=tensor([8.8337e-05, 1.0537e-04, 9.1037e-05, 8.5955e-05, 9.1176e-05, 9.1219e-05, + 1.0039e-04, 1.0701e-04], device='cuda:5') +2023-03-27 08:36:54,029 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4254, 1.3790, 1.8322, 1.7630, 1.5919, 3.3509, 1.3553, 1.5158], + device='cuda:5'), covar=tensor([0.1064, 0.1855, 0.1163, 0.0948, 0.1608, 0.0233, 0.1593, 0.1908], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 08:37:05,023 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-27 08:37:13,925 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-03-27 08:37:15,431 INFO [finetune.py:976] (5/7) Epoch 27, batch 1800, loss[loss=0.1727, simple_loss=0.2448, pruned_loss=0.05031, over 4923.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.2432, pruned_loss=0.04878, over 958190.22 frames. ], batch size: 33, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:37:23,562 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0759, 1.7474, 2.1936, 1.4003, 2.0481, 2.1471, 1.6060, 2.3505], + device='cuda:5'), covar=tensor([0.1200, 0.2052, 0.1431, 0.2011, 0.0952, 0.1446, 0.2784, 0.0793], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0207, 0.0194, 0.0192, 0.0175, 0.0214, 0.0219, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:37:27,761 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150732.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:37:33,306 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150735.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:37:38,145 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2563, 1.2543, 1.4447, 1.0040, 1.1856, 1.3106, 1.2297, 1.4551], + device='cuda:5'), covar=tensor([0.0881, 0.1604, 0.1085, 0.1194, 0.0728, 0.0940, 0.2629, 0.0723], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0208, 0.0195, 0.0192, 0.0176, 0.0215, 0.0219, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:37:51,388 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2790, 1.8415, 1.9492, 0.9328, 2.2237, 2.4473, 2.0598, 1.7876], + device='cuda:5'), covar=tensor([0.0919, 0.0815, 0.0550, 0.0778, 0.0582, 0.0597, 0.0476, 0.0790], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0128, 0.0123, 0.0131, 0.0129, 0.0142, 0.0150], + device='cuda:5'), out_proj_covar=tensor([8.8628e-05, 1.0569e-04, 9.1365e-05, 8.6301e-05, 9.1764e-05, 9.1537e-05, + 1.0113e-04, 1.0732e-04], device='cuda:5') +2023-03-27 08:37:57,191 INFO [finetune.py:976] (5/7) Epoch 27, batch 1850, loss[loss=0.2473, simple_loss=0.3102, pruned_loss=0.09218, over 4831.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2455, pruned_loss=0.04973, over 957361.91 frames. ], batch size: 47, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:37:57,787 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.063e+02 1.537e+02 1.800e+02 2.248e+02 4.542e+02, threshold=3.600e+02, percent-clipped=6.0 +2023-03-27 08:38:05,039 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=150783.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:38:11,140 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150793.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 08:38:30,230 INFO [finetune.py:976] (5/7) Epoch 27, batch 1900, loss[loss=0.1584, simple_loss=0.2325, pruned_loss=0.0422, over 4913.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.2456, pruned_loss=0.0496, over 954912.97 frames. ], batch size: 42, lr: 2.93e-03, grad_scale: 8.0 +2023-03-27 08:38:30,939 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2635, 1.4881, 0.7917, 1.9911, 2.4837, 1.8816, 1.8397, 1.8880], + device='cuda:5'), covar=tensor([0.1356, 0.2029, 0.1997, 0.1118, 0.1784, 0.1817, 0.1346, 0.1919], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0091, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 08:38:37,355 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 08:39:14,069 INFO [finetune.py:976] (5/7) Epoch 27, batch 1950, loss[loss=0.2057, simple_loss=0.2658, pruned_loss=0.07281, over 4780.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2446, pruned_loss=0.04896, over 956955.97 frames. ], batch size: 51, lr: 2.92e-03, grad_scale: 8.0 +2023-03-27 08:39:14,651 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.033e+02 1.460e+02 1.651e+02 1.933e+02 3.642e+02, threshold=3.302e+02, percent-clipped=1.0 +2023-03-27 08:39:28,776 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150893.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:39:28,835 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3999, 2.2022, 1.7916, 0.7970, 1.9038, 1.9074, 1.7916, 2.0639], + device='cuda:5'), covar=tensor([0.0821, 0.0805, 0.1458, 0.2072, 0.1320, 0.2139, 0.2050, 0.0909], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0191, 0.0201, 0.0182, 0.0209, 0.0211, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:39:33,113 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150900.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:39:47,874 INFO [finetune.py:976] (5/7) Epoch 27, batch 2000, loss[loss=0.2365, simple_loss=0.2782, pruned_loss=0.09742, over 4147.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2424, pruned_loss=0.04891, over 957403.87 frames. ], batch size: 65, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:39:54,524 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150930.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:40:03,198 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9405, 1.7460, 1.6067, 1.3763, 1.7316, 1.7280, 1.7523, 2.2839], + device='cuda:5'), covar=tensor([0.3895, 0.3918, 0.3428, 0.3731, 0.3618, 0.2526, 0.3424, 0.1889], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0264, 0.0237, 0.0277, 0.0261, 0.0230, 0.0259, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:40:15,359 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150961.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:40:18,284 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4667, 1.5551, 1.5475, 0.8776, 1.6179, 1.8856, 1.8547, 1.4355], + device='cuda:5'), covar=tensor([0.0958, 0.0590, 0.0557, 0.0555, 0.0481, 0.0508, 0.0333, 0.0661], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0128, 0.0123, 0.0131, 0.0130, 0.0142, 0.0150], + device='cuda:5'), out_proj_covar=tensor([8.8670e-05, 1.0558e-04, 9.1136e-05, 8.6289e-05, 9.1982e-05, 9.1704e-05, + 1.0080e-04, 1.0736e-04], device='cuda:5') +2023-03-27 08:40:21,603 INFO [finetune.py:976] (5/7) Epoch 27, batch 2050, loss[loss=0.1989, simple_loss=0.2597, pruned_loss=0.06908, over 4857.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2392, pruned_loss=0.04797, over 955160.29 frames. ], batch size: 49, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:40:22,191 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.304e+01 1.432e+02 1.658e+02 2.071e+02 3.830e+02, threshold=3.317e+02, percent-clipped=1.0 +2023-03-27 08:40:27,052 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=150978.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:40:36,700 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6725, 1.7393, 1.4430, 1.7320, 2.0443, 2.0885, 1.7151, 1.5343], + device='cuda:5'), covar=tensor([0.0349, 0.0321, 0.0647, 0.0304, 0.0208, 0.0424, 0.0343, 0.0437], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0106, 0.0147, 0.0112, 0.0102, 0.0116, 0.0104, 0.0114], + device='cuda:5'), out_proj_covar=tensor([7.8164e-05, 8.1364e-05, 1.1474e-04, 8.5394e-05, 7.8712e-05, 8.5236e-05, + 7.7060e-05, 8.6400e-05], device='cuda:5') +2023-03-27 08:40:37,108 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-03-27 08:40:56,344 INFO [finetune.py:976] (5/7) Epoch 27, batch 2100, loss[loss=0.1649, simple_loss=0.2366, pruned_loss=0.04664, over 4872.00 frames. ], tot_loss[loss=0.168, simple_loss=0.2397, pruned_loss=0.04819, over 954956.60 frames. ], batch size: 31, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:41:47,144 INFO [finetune.py:976] (5/7) Epoch 27, batch 2150, loss[loss=0.1714, simple_loss=0.2481, pruned_loss=0.04731, over 4821.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2423, pruned_loss=0.04889, over 953237.72 frames. ], batch size: 38, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:41:48,300 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.525e+02 1.813e+02 2.166e+02 3.448e+02, threshold=3.626e+02, percent-clipped=1.0 +2023-03-27 08:42:02,497 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151088.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 08:42:23,617 INFO [finetune.py:976] (5/7) Epoch 27, batch 2200, loss[loss=0.1876, simple_loss=0.2727, pruned_loss=0.0513, over 4726.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.2437, pruned_loss=0.04923, over 953748.34 frames. ], batch size: 54, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:42:41,312 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6459, 1.8841, 1.5312, 1.5996, 2.1316, 2.1184, 1.8544, 1.7852], + device='cuda:5'), covar=tensor([0.0502, 0.0374, 0.0615, 0.0409, 0.0375, 0.0655, 0.0389, 0.0460], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0107, 0.0148, 0.0113, 0.0103, 0.0117, 0.0104, 0.0115], + device='cuda:5'), out_proj_covar=tensor([7.8810e-05, 8.1916e-05, 1.1545e-04, 8.6028e-05, 7.9441e-05, 8.6094e-05, + 7.7458e-05, 8.6921e-05], device='cuda:5') +2023-03-27 08:43:04,290 INFO [finetune.py:976] (5/7) Epoch 27, batch 2250, loss[loss=0.1586, simple_loss=0.2359, pruned_loss=0.04068, over 4908.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2443, pruned_loss=0.04914, over 953137.54 frames. ], batch size: 43, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:43:04,889 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.120e+01 1.457e+02 1.754e+02 2.221e+02 3.820e+02, threshold=3.509e+02, percent-clipped=1.0 +2023-03-27 08:43:14,794 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151184.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:43:20,787 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151193.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:43:37,565 INFO [finetune.py:976] (5/7) Epoch 27, batch 2300, loss[loss=0.1763, simple_loss=0.2432, pruned_loss=0.05469, over 4829.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2438, pruned_loss=0.04867, over 952599.07 frames. ], batch size: 30, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:43:51,734 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=151241.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:43:56,885 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151245.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:44:06,984 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151256.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:44:08,929 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.52 vs. limit=5.0 +2023-03-27 08:44:18,929 INFO [finetune.py:976] (5/7) Epoch 27, batch 2350, loss[loss=0.1445, simple_loss=0.2158, pruned_loss=0.0366, over 4754.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2427, pruned_loss=0.04846, over 954400.28 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:44:19,965 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.208e+01 1.503e+02 1.827e+02 2.189e+02 3.264e+02, threshold=3.653e+02, percent-clipped=0.0 +2023-03-27 08:44:28,775 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151282.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:44:39,437 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151298.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:44:52,609 INFO [finetune.py:976] (5/7) Epoch 27, batch 2400, loss[loss=0.1485, simple_loss=0.2203, pruned_loss=0.03834, over 4744.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.2408, pruned_loss=0.04825, over 954954.66 frames. ], batch size: 27, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:45:06,815 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1500, 1.7812, 2.0625, 2.1280, 1.8504, 1.8925, 2.0968, 1.9542], + device='cuda:5'), covar=tensor([0.5048, 0.4950, 0.4224, 0.5017, 0.6299, 0.5337, 0.6242, 0.3965], + device='cuda:5'), in_proj_covar=tensor([0.0266, 0.0248, 0.0269, 0.0297, 0.0296, 0.0273, 0.0302, 0.0254], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:45:09,237 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151343.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:45:19,404 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151359.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:45:22,473 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8577, 1.6758, 1.5235, 1.9535, 2.1686, 1.9121, 1.4478, 1.5340], + device='cuda:5'), covar=tensor([0.2109, 0.1844, 0.1924, 0.1635, 0.1447, 0.1147, 0.2345, 0.1902], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0211, 0.0214, 0.0200, 0.0247, 0.0191, 0.0218, 0.0206], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:45:26,016 INFO [finetune.py:976] (5/7) Epoch 27, batch 2450, loss[loss=0.1782, simple_loss=0.251, pruned_loss=0.05271, over 4910.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2392, pruned_loss=0.04823, over 955657.96 frames. ], batch size: 36, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:45:26,602 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.415e+02 1.689e+02 1.968e+02 4.441e+02, threshold=3.378e+02, percent-clipped=1.0 +2023-03-27 08:45:38,491 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151388.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:45:55,925 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7149, 3.6638, 3.5238, 1.9893, 3.8770, 2.9277, 0.9886, 2.6434], + device='cuda:5'), covar=tensor([0.2631, 0.1828, 0.1449, 0.3151, 0.0882, 0.0976, 0.4331, 0.1555], + device='cuda:5'), in_proj_covar=tensor([0.0148, 0.0178, 0.0159, 0.0129, 0.0160, 0.0123, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 08:45:58,930 INFO [finetune.py:976] (5/7) Epoch 27, batch 2500, loss[loss=0.1569, simple_loss=0.2333, pruned_loss=0.0403, over 4766.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2407, pruned_loss=0.0489, over 955291.58 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:46:12,821 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=151436.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:46:48,697 INFO [finetune.py:976] (5/7) Epoch 27, batch 2550, loss[loss=0.2067, simple_loss=0.28, pruned_loss=0.06667, over 4920.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2435, pruned_loss=0.04963, over 954613.38 frames. ], batch size: 42, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:46:49,280 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.209e+01 1.472e+02 1.881e+02 2.470e+02 3.912e+02, threshold=3.762e+02, percent-clipped=2.0 +2023-03-27 08:46:56,412 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-27 08:47:24,842 INFO [finetune.py:976] (5/7) Epoch 27, batch 2600, loss[loss=0.2088, simple_loss=0.2769, pruned_loss=0.07034, over 4825.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.246, pruned_loss=0.05077, over 956189.28 frames. ], batch size: 39, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:47:42,187 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151540.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:48:03,484 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151556.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:48:10,392 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 08:48:16,181 INFO [finetune.py:976] (5/7) Epoch 27, batch 2650, loss[loss=0.1932, simple_loss=0.26, pruned_loss=0.0632, over 4888.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2464, pruned_loss=0.05053, over 955225.15 frames. ], batch size: 35, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:48:16,784 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.649e+02 1.887e+02 2.270e+02 4.456e+02, threshold=3.774e+02, percent-clipped=3.0 +2023-03-27 08:48:39,922 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=151604.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:48:49,946 INFO [finetune.py:976] (5/7) Epoch 27, batch 2700, loss[loss=0.1806, simple_loss=0.2485, pruned_loss=0.05636, over 4861.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2452, pruned_loss=0.04996, over 954520.39 frames. ], batch size: 34, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:49:01,310 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151638.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:49:03,204 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0179, 2.1202, 1.6788, 1.8061, 2.3952, 2.5277, 2.1772, 1.9647], + device='cuda:5'), covar=tensor([0.0408, 0.0352, 0.0618, 0.0378, 0.0273, 0.0598, 0.0313, 0.0419], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0106, 0.0146, 0.0111, 0.0101, 0.0115, 0.0103, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.7605e-05, 8.0999e-05, 1.1402e-04, 8.4815e-05, 7.8258e-05, 8.5017e-05, + 7.6351e-05, 8.5796e-05], device='cuda:5') +2023-03-27 08:49:12,824 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151654.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:49:29,780 INFO [finetune.py:976] (5/7) Epoch 27, batch 2750, loss[loss=0.1816, simple_loss=0.2397, pruned_loss=0.0618, over 4831.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2424, pruned_loss=0.04928, over 952114.54 frames. ], batch size: 33, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:49:30,373 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.940e+01 1.418e+02 1.693e+02 2.178e+02 3.976e+02, threshold=3.385e+02, percent-clipped=1.0 +2023-03-27 08:49:44,698 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151688.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:49:45,295 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4464, 1.5952, 2.0062, 1.6804, 1.7260, 3.5330, 1.5155, 1.6942], + device='cuda:5'), covar=tensor([0.1144, 0.2014, 0.1208, 0.1105, 0.1789, 0.0286, 0.1644, 0.2112], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0074, 0.0076, 0.0092, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 08:50:06,331 INFO [finetune.py:976] (5/7) Epoch 27, batch 2800, loss[loss=0.1568, simple_loss=0.2306, pruned_loss=0.04147, over 4852.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.2403, pruned_loss=0.04848, over 954036.62 frames. ], batch size: 44, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:50:12,929 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1492, 3.5900, 3.8122, 3.9597, 3.9081, 3.6541, 4.2029, 1.3848], + device='cuda:5'), covar=tensor([0.0906, 0.0966, 0.0996, 0.1115, 0.1198, 0.1701, 0.0858, 0.5696], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0249, 0.0282, 0.0297, 0.0335, 0.0288, 0.0307, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:50:24,980 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151749.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:50:39,488 INFO [finetune.py:976] (5/7) Epoch 27, batch 2850, loss[loss=0.1491, simple_loss=0.2253, pruned_loss=0.03646, over 4821.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2393, pruned_loss=0.04821, over 955899.39 frames. ], batch size: 39, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:50:40,098 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.485e+02 1.795e+02 2.169e+02 3.375e+02, threshold=3.589e+02, percent-clipped=0.0 +2023-03-27 08:50:51,028 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7027, 1.4853, 1.8730, 1.1882, 1.6552, 1.7882, 1.4072, 2.0290], + device='cuda:5'), covar=tensor([0.1234, 0.2047, 0.1362, 0.1786, 0.0866, 0.1210, 0.2909, 0.0828], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0207, 0.0193, 0.0190, 0.0175, 0.0213, 0.0218, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:50:59,490 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151801.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:51:12,921 INFO [finetune.py:976] (5/7) Epoch 27, batch 2900, loss[loss=0.1729, simple_loss=0.233, pruned_loss=0.05636, over 4746.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2428, pruned_loss=0.04885, over 955687.38 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:51:25,537 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151840.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:51:34,461 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2764, 2.8789, 2.7588, 1.1273, 3.0081, 2.1883, 0.8030, 1.8479], + device='cuda:5'), covar=tensor([0.2486, 0.2444, 0.1939, 0.4026, 0.1481, 0.1251, 0.4192, 0.1795], + device='cuda:5'), in_proj_covar=tensor([0.0149, 0.0179, 0.0160, 0.0130, 0.0160, 0.0124, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 08:51:54,450 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151862.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:51:56,255 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0411, 1.3319, 0.7330, 1.8212, 2.3685, 1.8092, 1.5832, 1.8001], + device='cuda:5'), covar=tensor([0.1445, 0.1963, 0.2016, 0.1126, 0.1789, 0.1827, 0.1381, 0.1831], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0092, 0.0107, 0.0090, 0.0117, 0.0091, 0.0097, 0.0087], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 08:52:04,136 INFO [finetune.py:976] (5/7) Epoch 27, batch 2950, loss[loss=0.1754, simple_loss=0.2562, pruned_loss=0.0473, over 4904.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2441, pruned_loss=0.04915, over 954481.89 frames. ], batch size: 37, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:52:04,751 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.531e+02 1.876e+02 2.281e+02 4.815e+02, threshold=3.752e+02, percent-clipped=2.0 +2023-03-27 08:52:15,643 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=151888.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:52:26,517 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5809, 1.5225, 2.2491, 1.8220, 1.6885, 3.7463, 1.4220, 1.6508], + device='cuda:5'), covar=tensor([0.0931, 0.1707, 0.1224, 0.0865, 0.1525, 0.0214, 0.1418, 0.1700], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0091, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 08:52:37,408 INFO [finetune.py:976] (5/7) Epoch 27, batch 3000, loss[loss=0.1183, simple_loss=0.1939, pruned_loss=0.02133, over 4777.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2461, pruned_loss=0.04976, over 955795.64 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:52:37,408 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 08:52:50,760 INFO [finetune.py:1010] (5/7) Epoch 27, validation: loss=0.1572, simple_loss=0.2248, pruned_loss=0.04486, over 2265189.00 frames. +2023-03-27 08:52:50,760 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 08:53:01,271 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1984, 2.3413, 1.9233, 2.4306, 2.1889, 2.1481, 2.1798, 3.0165], + device='cuda:5'), covar=tensor([0.3868, 0.4832, 0.3321, 0.3962, 0.4303, 0.2507, 0.4199, 0.1718], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0236, 0.0274, 0.0260, 0.0229, 0.0258, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:53:01,836 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151938.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:53:14,183 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151954.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:53:32,162 INFO [finetune.py:976] (5/7) Epoch 27, batch 3050, loss[loss=0.1977, simple_loss=0.2641, pruned_loss=0.06569, over 4811.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2457, pruned_loss=0.04932, over 957748.12 frames. ], batch size: 33, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:53:32,747 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.022e+02 1.546e+02 1.837e+02 2.199e+02 4.500e+02, threshold=3.674e+02, percent-clipped=2.0 +2023-03-27 08:53:44,360 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=151986.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:53:55,280 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0198, 4.7514, 4.5546, 2.8148, 4.8647, 3.8186, 0.8845, 3.5111], + device='cuda:5'), covar=tensor([0.2234, 0.1883, 0.1273, 0.2839, 0.0707, 0.0734, 0.4595, 0.1413], + device='cuda:5'), in_proj_covar=tensor([0.0149, 0.0178, 0.0159, 0.0129, 0.0160, 0.0123, 0.0148, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 08:53:55,893 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=152002.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:54:06,689 INFO [finetune.py:976] (5/7) Epoch 27, batch 3100, loss[loss=0.1226, simple_loss=0.2007, pruned_loss=0.02231, over 4768.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2443, pruned_loss=0.04865, over 957995.80 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:54:23,682 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152044.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:54:29,205 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-27 08:54:32,328 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-03-27 08:54:41,699 INFO [finetune.py:976] (5/7) Epoch 27, batch 3150, loss[loss=0.138, simple_loss=0.2198, pruned_loss=0.02806, over 4754.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.241, pruned_loss=0.04766, over 957528.15 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:54:42,282 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.945e+01 1.491e+02 1.827e+02 2.202e+02 3.039e+02, threshold=3.654e+02, percent-clipped=0.0 +2023-03-27 08:55:06,928 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1920, 2.0779, 2.0115, 2.3939, 2.6508, 2.3453, 2.1841, 1.9185], + device='cuda:5'), covar=tensor([0.2110, 0.1882, 0.1782, 0.1568, 0.1493, 0.1045, 0.1973, 0.1778], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0214, 0.0217, 0.0202, 0.0250, 0.0193, 0.0221, 0.0208], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:55:21,806 INFO [finetune.py:976] (5/7) Epoch 27, batch 3200, loss[loss=0.1478, simple_loss=0.2153, pruned_loss=0.04021, over 4718.00 frames. ], tot_loss[loss=0.1651, simple_loss=0.237, pruned_loss=0.04656, over 957098.63 frames. ], batch size: 59, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:55:46,799 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152157.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:55:54,667 INFO [finetune.py:976] (5/7) Epoch 27, batch 3250, loss[loss=0.1328, simple_loss=0.2122, pruned_loss=0.0267, over 4892.00 frames. ], tot_loss[loss=0.1652, simple_loss=0.2368, pruned_loss=0.04682, over 956702.60 frames. ], batch size: 32, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:55:55,262 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.795e+01 1.453e+02 1.756e+02 2.073e+02 3.538e+02, threshold=3.512e+02, percent-clipped=0.0 +2023-03-27 08:56:32,281 INFO [finetune.py:976] (5/7) Epoch 27, batch 3300, loss[loss=0.1476, simple_loss=0.2039, pruned_loss=0.04563, over 4279.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.2398, pruned_loss=0.04752, over 955657.96 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:56:35,420 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152225.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:56:45,509 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6369, 1.4349, 0.9762, 0.2716, 1.1704, 1.4693, 1.3853, 1.3113], + device='cuda:5'), covar=tensor([0.0905, 0.0950, 0.1409, 0.1957, 0.1541, 0.2714, 0.2582, 0.0895], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0190, 0.0200, 0.0180, 0.0208, 0.0210, 0.0224, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 08:57:13,838 INFO [finetune.py:976] (5/7) Epoch 27, batch 3350, loss[loss=0.1675, simple_loss=0.236, pruned_loss=0.04954, over 4853.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2427, pruned_loss=0.0484, over 955361.26 frames. ], batch size: 31, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:57:14,395 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.581e+01 1.606e+02 1.884e+02 2.337e+02 3.345e+02, threshold=3.768e+02, percent-clipped=0.0 +2023-03-27 08:57:20,881 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152273.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:57:33,587 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152286.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:58:01,024 INFO [finetune.py:976] (5/7) Epoch 27, batch 3400, loss[loss=0.1878, simple_loss=0.2631, pruned_loss=0.05619, over 4757.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.244, pruned_loss=0.04887, over 956303.68 frames. ], batch size: 54, lr: 2.92e-03, grad_scale: 16.0 +2023-03-27 08:58:09,662 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152334.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:58:16,672 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152344.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:58:36,174 INFO [finetune.py:976] (5/7) Epoch 27, batch 3450, loss[loss=0.1818, simple_loss=0.2359, pruned_loss=0.06378, over 4889.00 frames. ], tot_loss[loss=0.171, simple_loss=0.244, pruned_loss=0.04902, over 955040.54 frames. ], batch size: 35, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 08:58:36,744 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.916e+01 1.467e+02 1.787e+02 2.252e+02 4.149e+02, threshold=3.573e+02, percent-clipped=3.0 +2023-03-27 08:58:58,955 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=152392.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:59:17,725 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5841, 1.6452, 2.2745, 1.9226, 1.9680, 4.3919, 1.6951, 1.8394], + device='cuda:5'), covar=tensor([0.0953, 0.1863, 0.1072, 0.0950, 0.1503, 0.0156, 0.1409, 0.1712], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0074, 0.0077, 0.0092, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 08:59:18,828 INFO [finetune.py:976] (5/7) Epoch 27, batch 3500, loss[loss=0.14, simple_loss=0.2059, pruned_loss=0.03703, over 4947.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2428, pruned_loss=0.04883, over 955881.12 frames. ], batch size: 33, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 08:59:34,581 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152445.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:59:43,787 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152457.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 08:59:52,115 INFO [finetune.py:976] (5/7) Epoch 27, batch 3550, loss[loss=0.138, simple_loss=0.2079, pruned_loss=0.03405, over 4908.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2402, pruned_loss=0.0482, over 954950.18 frames. ], batch size: 32, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 08:59:52,707 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.528e+01 1.381e+02 1.664e+02 2.040e+02 3.997e+02, threshold=3.328e+02, percent-clipped=1.0 +2023-03-27 09:00:25,617 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=152505.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:00:26,805 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152506.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:00:36,269 INFO [finetune.py:976] (5/7) Epoch 27, batch 3600, loss[loss=0.1669, simple_loss=0.2515, pruned_loss=0.04114, over 4806.00 frames. ], tot_loss[loss=0.1661, simple_loss=0.2376, pruned_loss=0.04728, over 955720.68 frames. ], batch size: 45, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:01:10,218 INFO [finetune.py:976] (5/7) Epoch 27, batch 3650, loss[loss=0.1464, simple_loss=0.2172, pruned_loss=0.0378, over 4761.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2399, pruned_loss=0.04814, over 954138.76 frames. ], batch size: 28, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:01:10,829 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.982e+01 1.594e+02 1.907e+02 2.265e+02 4.160e+02, threshold=3.814e+02, percent-clipped=3.0 +2023-03-27 09:01:17,562 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152581.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:01:18,190 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152582.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:01:18,823 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3467, 2.2757, 2.0440, 1.1504, 2.1952, 1.8779, 1.7476, 2.2198], + device='cuda:5'), covar=tensor([0.0925, 0.0753, 0.1637, 0.1941, 0.1330, 0.1969, 0.2102, 0.0866], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0191, 0.0201, 0.0182, 0.0209, 0.0211, 0.0225, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:01:22,487 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152589.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:01:29,814 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1549, 1.6207, 1.8037, 0.8404, 2.0379, 2.3405, 1.8994, 1.7065], + device='cuda:5'), covar=tensor([0.1206, 0.1264, 0.0678, 0.0860, 0.0766, 0.0813, 0.0699, 0.1000], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0147, 0.0130, 0.0123, 0.0132, 0.0131, 0.0142, 0.0151], + device='cuda:5'), out_proj_covar=tensor([8.8967e-05, 1.0578e-04, 9.2803e-05, 8.6171e-05, 9.2185e-05, 9.2475e-05, + 1.0091e-04, 1.0774e-04], device='cuda:5') +2023-03-27 09:01:46,436 INFO [finetune.py:976] (5/7) Epoch 27, batch 3700, loss[loss=0.1634, simple_loss=0.2512, pruned_loss=0.03781, over 4854.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2417, pruned_loss=0.04827, over 954461.32 frames. ], batch size: 49, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:01:52,533 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152629.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:02:01,193 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152643.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:02:02,375 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3107, 2.9437, 3.0916, 3.2469, 3.0855, 2.8945, 3.3595, 0.9438], + device='cuda:5'), covar=tensor([0.1128, 0.1018, 0.1068, 0.1100, 0.1642, 0.1919, 0.1118, 0.5672], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0246, 0.0281, 0.0294, 0.0332, 0.0287, 0.0303, 0.0300], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:02:05,483 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152650.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:02:22,199 INFO [finetune.py:976] (5/7) Epoch 27, batch 3750, loss[loss=0.1904, simple_loss=0.2572, pruned_loss=0.06179, over 4809.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2423, pruned_loss=0.04804, over 955044.81 frames. ], batch size: 40, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:02:22,800 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.491e+02 1.751e+02 2.166e+02 4.226e+02, threshold=3.502e+02, percent-clipped=3.0 +2023-03-27 09:03:12,606 INFO [finetune.py:976] (5/7) Epoch 27, batch 3800, loss[loss=0.1666, simple_loss=0.2561, pruned_loss=0.03855, over 4844.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2441, pruned_loss=0.04848, over 955422.37 frames. ], batch size: 44, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:03:16,895 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152726.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:03:45,579 INFO [finetune.py:976] (5/7) Epoch 27, batch 3850, loss[loss=0.2439, simple_loss=0.3005, pruned_loss=0.09364, over 4814.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2434, pruned_loss=0.04843, over 956052.95 frames. ], batch size: 38, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:03:46,654 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.206e+01 1.335e+02 1.631e+02 2.144e+02 3.589e+02, threshold=3.262e+02, percent-clipped=1.0 +2023-03-27 09:03:59,986 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152787.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:04:12,777 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152801.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:04:22,911 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6461, 1.6255, 1.5599, 1.6346, 1.1034, 3.2972, 1.2735, 1.5806], + device='cuda:5'), covar=tensor([0.3224, 0.2543, 0.2026, 0.2293, 0.1740, 0.0243, 0.2584, 0.1255], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0113, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 09:04:28,203 INFO [finetune.py:976] (5/7) Epoch 27, batch 3900, loss[loss=0.18, simple_loss=0.2361, pruned_loss=0.06189, over 4217.00 frames. ], tot_loss[loss=0.1678, simple_loss=0.2403, pruned_loss=0.0476, over 955106.30 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:04:50,173 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.4686, 1.5351, 1.5645, 0.8956, 1.6574, 1.9058, 1.8392, 1.4601], + device='cuda:5'), covar=tensor([0.0849, 0.0605, 0.0492, 0.0524, 0.0467, 0.0489, 0.0285, 0.0598], + device='cuda:5'), in_proj_covar=tensor([0.0123, 0.0149, 0.0131, 0.0124, 0.0133, 0.0132, 0.0144, 0.0152], + device='cuda:5'), out_proj_covar=tensor([8.9888e-05, 1.0664e-04, 9.3337e-05, 8.6866e-05, 9.3022e-05, 9.3541e-05, + 1.0206e-04, 1.0862e-04], device='cuda:5') +2023-03-27 09:04:53,202 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7020, 1.6569, 1.9159, 1.2872, 1.5979, 1.9233, 1.5307, 2.1287], + device='cuda:5'), covar=tensor([0.1326, 0.2022, 0.1254, 0.1671, 0.1051, 0.1343, 0.2752, 0.0806], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0207, 0.0193, 0.0189, 0.0175, 0.0214, 0.0217, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:05:01,433 INFO [finetune.py:976] (5/7) Epoch 27, batch 3950, loss[loss=0.1587, simple_loss=0.2329, pruned_loss=0.04223, over 4823.00 frames. ], tot_loss[loss=0.1665, simple_loss=0.2383, pruned_loss=0.04738, over 954762.76 frames. ], batch size: 41, lr: 2.91e-03, grad_scale: 16.0 +2023-03-27 09:05:02,041 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.044e+02 1.440e+02 1.686e+02 2.039e+02 3.105e+02, threshold=3.372e+02, percent-clipped=0.0 +2023-03-27 09:05:09,733 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152881.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:05:10,375 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1726, 1.9868, 1.8842, 2.1383, 1.9034, 4.8376, 1.9342, 2.2986], + device='cuda:5'), covar=tensor([0.2994, 0.2354, 0.1937, 0.2188, 0.1399, 0.0089, 0.2165, 0.1150], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0121, 0.0124, 0.0113, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 09:05:43,212 INFO [finetune.py:976] (5/7) Epoch 27, batch 4000, loss[loss=0.2019, simple_loss=0.2662, pruned_loss=0.0688, over 4754.00 frames. ], tot_loss[loss=0.167, simple_loss=0.2385, pruned_loss=0.04777, over 953856.64 frames. ], batch size: 59, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:05:49,733 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=152929.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:05:49,760 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152929.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:05:53,842 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7671, 1.6246, 1.5478, 1.6463, 1.2731, 4.0692, 1.5470, 1.7762], + device='cuda:5'), covar=tensor([0.3181, 0.2486, 0.2113, 0.2387, 0.1731, 0.0168, 0.2554, 0.1329], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0117, 0.0121, 0.0124, 0.0113, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 09:05:56,139 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152938.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:06:00,343 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152945.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:06:13,718 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4874, 1.4261, 1.3173, 1.6097, 1.5441, 1.6029, 0.9813, 1.3338], + device='cuda:5'), covar=tensor([0.2386, 0.2200, 0.2096, 0.1725, 0.1753, 0.1299, 0.2779, 0.2017], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0212, 0.0217, 0.0201, 0.0247, 0.0192, 0.0220, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:06:16,490 INFO [finetune.py:976] (5/7) Epoch 27, batch 4050, loss[loss=0.1638, simple_loss=0.2315, pruned_loss=0.04802, over 4899.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.241, pruned_loss=0.04857, over 953431.69 frames. ], batch size: 35, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:06:17,094 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.013e+02 1.469e+02 1.767e+02 2.180e+02 3.425e+02, threshold=3.534e+02, percent-clipped=1.0 +2023-03-27 09:06:20,836 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=152977.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:06:21,997 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8860, 1.3817, 0.6949, 1.6903, 2.2233, 1.4862, 1.6947, 1.7121], + device='cuda:5'), covar=tensor([0.1513, 0.2076, 0.2076, 0.1233, 0.2022, 0.1960, 0.1371, 0.1937], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0109, 0.0091, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 09:06:45,421 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-03-27 09:06:49,257 INFO [finetune.py:976] (5/7) Epoch 27, batch 4100, loss[loss=0.205, simple_loss=0.2833, pruned_loss=0.06334, over 4919.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2438, pruned_loss=0.04954, over 952382.38 frames. ], batch size: 42, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:06:55,372 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-03-27 09:07:21,109 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8900, 1.7717, 1.6275, 1.7238, 2.1844, 2.1913, 1.8459, 1.6110], + device='cuda:5'), covar=tensor([0.0375, 0.0344, 0.0644, 0.0356, 0.0236, 0.0405, 0.0407, 0.0428], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0107, 0.0149, 0.0113, 0.0102, 0.0116, 0.0105, 0.0114], + device='cuda:5'), out_proj_covar=tensor([7.8594e-05, 8.1523e-05, 1.1568e-04, 8.6050e-05, 7.9094e-05, 8.5677e-05, + 7.7650e-05, 8.6534e-05], device='cuda:5') +2023-03-27 09:07:22,845 INFO [finetune.py:976] (5/7) Epoch 27, batch 4150, loss[loss=0.1764, simple_loss=0.2598, pruned_loss=0.04643, over 4930.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2456, pruned_loss=0.05002, over 951286.52 frames. ], batch size: 42, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:07:23,441 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.058e+02 1.644e+02 1.926e+02 2.373e+02 3.999e+02, threshold=3.851e+02, percent-clipped=3.0 +2023-03-27 09:07:31,217 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153082.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:07:51,129 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153101.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:08:02,773 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.7917, 3.3554, 3.5239, 3.6747, 3.5737, 3.3346, 3.8659, 1.1792], + device='cuda:5'), covar=tensor([0.0892, 0.0836, 0.0976, 0.1007, 0.1295, 0.1766, 0.0910, 0.5829], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0249, 0.0284, 0.0296, 0.0334, 0.0288, 0.0306, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:08:07,059 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153117.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:08:13,348 INFO [finetune.py:976] (5/7) Epoch 27, batch 4200, loss[loss=0.1522, simple_loss=0.2301, pruned_loss=0.03711, over 4771.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.2453, pruned_loss=0.04939, over 951856.16 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:08:26,229 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6035, 1.6941, 2.0734, 1.8653, 1.8516, 3.3033, 1.5563, 1.8355], + device='cuda:5'), covar=tensor([0.0961, 0.1671, 0.0977, 0.0889, 0.1463, 0.0352, 0.1420, 0.1539], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0076, 0.0091, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 09:08:36,712 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=153149.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:08:48,680 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4502, 3.8679, 4.0965, 4.1297, 4.2515, 4.0384, 4.5140, 1.8424], + device='cuda:5'), covar=tensor([0.0717, 0.0775, 0.0819, 0.0961, 0.1004, 0.1224, 0.0631, 0.4778], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0248, 0.0284, 0.0295, 0.0334, 0.0289, 0.0305, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:08:49,826 INFO [finetune.py:976] (5/7) Epoch 27, batch 4250, loss[loss=0.1286, simple_loss=0.2096, pruned_loss=0.02382, over 4788.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2428, pruned_loss=0.04859, over 951333.55 frames. ], batch size: 29, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:08:50,416 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.682e+01 1.570e+02 1.909e+02 2.227e+02 3.978e+02, threshold=3.818e+02, percent-clipped=1.0 +2023-03-27 09:08:54,792 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153178.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:08:57,083 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3873, 1.4055, 1.6069, 1.0854, 1.2907, 1.5310, 1.3434, 1.6729], + device='cuda:5'), covar=tensor([0.1189, 0.2038, 0.1231, 0.1452, 0.0972, 0.1173, 0.2870, 0.0830], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0208, 0.0194, 0.0190, 0.0175, 0.0215, 0.0218, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:09:15,758 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.7942, 3.3042, 3.4820, 3.6424, 3.5693, 3.3718, 3.8789, 1.2179], + device='cuda:5'), covar=tensor([0.0920, 0.0903, 0.0889, 0.0977, 0.1327, 0.1580, 0.0859, 0.5601], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0248, 0.0283, 0.0295, 0.0333, 0.0288, 0.0305, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:09:26,310 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.92 vs. limit=5.0 +2023-03-27 09:09:33,255 INFO [finetune.py:976] (5/7) Epoch 27, batch 4300, loss[loss=0.1469, simple_loss=0.2202, pruned_loss=0.03677, over 4860.00 frames. ], tot_loss[loss=0.168, simple_loss=0.2401, pruned_loss=0.04797, over 951598.58 frames. ], batch size: 31, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:09:44,685 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153238.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:09:49,804 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153245.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:10:06,730 INFO [finetune.py:976] (5/7) Epoch 27, batch 4350, loss[loss=0.1803, simple_loss=0.249, pruned_loss=0.05578, over 4910.00 frames. ], tot_loss[loss=0.1655, simple_loss=0.2374, pruned_loss=0.04676, over 953421.83 frames. ], batch size: 37, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:10:07,328 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.429e+01 1.453e+02 1.753e+02 2.112e+02 4.699e+02, threshold=3.507e+02, percent-clipped=1.0 +2023-03-27 09:10:11,678 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153278.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:10:16,464 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=153286.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:10:21,205 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=153293.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:10:41,647 INFO [finetune.py:976] (5/7) Epoch 27, batch 4400, loss[loss=0.1647, simple_loss=0.2475, pruned_loss=0.04097, over 4860.00 frames. ], tot_loss[loss=0.1677, simple_loss=0.2396, pruned_loss=0.04791, over 952595.46 frames. ], batch size: 44, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:11:01,178 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153339.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:11:23,047 INFO [finetune.py:976] (5/7) Epoch 27, batch 4450, loss[loss=0.148, simple_loss=0.2357, pruned_loss=0.03013, over 4761.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.2417, pruned_loss=0.04791, over 951861.59 frames. ], batch size: 28, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:11:23,635 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.494e+02 1.793e+02 2.132e+02 3.020e+02, threshold=3.586e+02, percent-clipped=0.0 +2023-03-27 09:11:30,907 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153382.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:11:56,785 INFO [finetune.py:976] (5/7) Epoch 27, batch 4500, loss[loss=0.2281, simple_loss=0.2942, pruned_loss=0.08098, over 4893.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.2442, pruned_loss=0.04885, over 952875.25 frames. ], batch size: 43, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:11:57,471 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153421.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:12:03,376 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=153430.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:12:12,346 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.59 vs. limit=5.0 +2023-03-27 09:12:29,936 INFO [finetune.py:976] (5/7) Epoch 27, batch 4550, loss[loss=0.1791, simple_loss=0.2423, pruned_loss=0.05791, over 4727.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2452, pruned_loss=0.0491, over 952746.90 frames. ], batch size: 23, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:12:30,053 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1283, 2.9149, 2.6290, 1.5066, 2.7951, 2.1962, 2.1997, 2.5572], + device='cuda:5'), covar=tensor([0.1075, 0.0709, 0.1819, 0.1950, 0.1630, 0.2381, 0.2050, 0.1119], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0191, 0.0201, 0.0182, 0.0211, 0.0211, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:12:30,508 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.518e+01 1.590e+02 1.867e+02 2.229e+02 3.919e+02, threshold=3.734e+02, percent-clipped=1.0 +2023-03-27 09:12:31,761 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153473.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:12:33,682 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6927, 0.7698, 1.7197, 1.7092, 1.5911, 1.5102, 1.6072, 1.6913], + device='cuda:5'), covar=tensor([0.3404, 0.3545, 0.3317, 0.3175, 0.4375, 0.3506, 0.4110, 0.2913], + device='cuda:5'), in_proj_covar=tensor([0.0265, 0.0246, 0.0268, 0.0297, 0.0294, 0.0272, 0.0301, 0.0252], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:12:37,684 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153482.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:13:14,340 INFO [finetune.py:976] (5/7) Epoch 27, batch 4600, loss[loss=0.193, simple_loss=0.2508, pruned_loss=0.06759, over 4894.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2456, pruned_loss=0.04938, over 953317.93 frames. ], batch size: 43, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:13:30,985 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8869, 1.6624, 1.5304, 1.5814, 2.0765, 2.0443, 1.6981, 1.5583], + device='cuda:5'), covar=tensor([0.0276, 0.0315, 0.0630, 0.0366, 0.0201, 0.0389, 0.0388, 0.0428], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0106, 0.0148, 0.0112, 0.0101, 0.0115, 0.0104, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.8415e-05, 8.1054e-05, 1.1503e-04, 8.5623e-05, 7.8413e-05, 8.5102e-05, + 7.6897e-05, 8.5929e-05], device='cuda:5') +2023-03-27 09:13:56,974 INFO [finetune.py:976] (5/7) Epoch 27, batch 4650, loss[loss=0.1458, simple_loss=0.2162, pruned_loss=0.03767, over 4846.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2424, pruned_loss=0.04834, over 954873.71 frames. ], batch size: 44, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:13:57,586 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.034e+02 1.461e+02 1.737e+02 2.165e+02 3.643e+02, threshold=3.475e+02, percent-clipped=0.0 +2023-03-27 09:14:30,261 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4166, 2.3034, 1.7768, 0.8387, 1.8346, 1.8989, 1.8063, 2.0228], + device='cuda:5'), covar=tensor([0.0869, 0.0762, 0.1648, 0.2132, 0.1541, 0.2510, 0.2212, 0.1009], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0192, 0.0202, 0.0183, 0.0212, 0.0211, 0.0225, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:14:31,814 INFO [finetune.py:976] (5/7) Epoch 27, batch 4700, loss[loss=0.1982, simple_loss=0.2525, pruned_loss=0.07192, over 4857.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.2393, pruned_loss=0.04754, over 956033.47 frames. ], batch size: 47, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:14:47,930 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153634.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:14:53,331 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6078, 1.5043, 1.4449, 1.5848, 1.3062, 3.4408, 1.3686, 1.7142], + device='cuda:5'), covar=tensor([0.3340, 0.2662, 0.2199, 0.2429, 0.1598, 0.0223, 0.2515, 0.1281], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0124, 0.0113, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 09:15:12,023 INFO [finetune.py:976] (5/7) Epoch 27, batch 4750, loss[loss=0.1305, simple_loss=0.1895, pruned_loss=0.03577, over 4274.00 frames. ], tot_loss[loss=0.1667, simple_loss=0.2381, pruned_loss=0.04767, over 955035.71 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:15:13,078 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.473e+02 1.795e+02 2.173e+02 4.465e+02, threshold=3.590e+02, percent-clipped=3.0 +2023-03-27 09:15:28,189 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8381, 1.6205, 1.4040, 1.2910, 1.8610, 1.6055, 2.0768, 1.8404], + device='cuda:5'), covar=tensor([0.1448, 0.1919, 0.3347, 0.2628, 0.2769, 0.1683, 0.2237, 0.1820], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0237, 0.0255, 0.0251, 0.0208, 0.0217, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:15:45,873 INFO [finetune.py:976] (5/7) Epoch 27, batch 4800, loss[loss=0.2287, simple_loss=0.3005, pruned_loss=0.07841, over 4896.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2395, pruned_loss=0.04788, over 955502.98 frames. ], batch size: 32, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:16:28,411 INFO [finetune.py:976] (5/7) Epoch 27, batch 4850, loss[loss=0.1676, simple_loss=0.2545, pruned_loss=0.04039, over 4789.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2422, pruned_loss=0.04851, over 955925.38 frames. ], batch size: 29, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:16:28,977 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.544e+02 1.777e+02 2.223e+02 4.381e+02, threshold=3.554e+02, percent-clipped=2.0 +2023-03-27 09:16:30,768 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153773.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:16:33,640 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153777.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:16:37,260 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5585, 2.0516, 2.8361, 1.9305, 2.4322, 2.8102, 1.9100, 2.7875], + device='cuda:5'), covar=tensor([0.1358, 0.2136, 0.1502, 0.1922, 0.1004, 0.1481, 0.2984, 0.0906], + device='cuda:5'), in_proj_covar=tensor([0.0194, 0.0208, 0.0195, 0.0190, 0.0175, 0.0215, 0.0219, 0.0200], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:17:00,917 INFO [finetune.py:976] (5/7) Epoch 27, batch 4900, loss[loss=0.1953, simple_loss=0.2589, pruned_loss=0.06583, over 4687.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2444, pruned_loss=0.04927, over 958104.68 frames. ], batch size: 23, lr: 2.91e-03, grad_scale: 32.0 +2023-03-27 09:17:02,076 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=153821.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:17:20,910 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-27 09:17:34,599 INFO [finetune.py:976] (5/7) Epoch 27, batch 4950, loss[loss=0.1817, simple_loss=0.259, pruned_loss=0.05219, over 4882.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2459, pruned_loss=0.0501, over 956205.98 frames. ], batch size: 32, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:17:35,194 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.496e+02 1.750e+02 2.158e+02 3.393e+02, threshold=3.501e+02, percent-clipped=0.0 +2023-03-27 09:18:10,031 INFO [finetune.py:976] (5/7) Epoch 27, batch 5000, loss[loss=0.1738, simple_loss=0.2439, pruned_loss=0.0518, over 4901.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.245, pruned_loss=0.04976, over 957705.92 frames. ], batch size: 36, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:18:17,219 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153923.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:18:27,363 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-03-27 09:18:28,733 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153934.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:19:01,716 INFO [finetune.py:976] (5/7) Epoch 27, batch 5050, loss[loss=0.1849, simple_loss=0.2478, pruned_loss=0.06097, over 4850.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.2427, pruned_loss=0.04903, over 958277.31 frames. ], batch size: 47, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:19:02,311 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.210e+01 1.435e+02 1.808e+02 2.168e+02 4.775e+02, threshold=3.617e+02, percent-clipped=1.0 +2023-03-27 09:19:07,834 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0680, 2.0084, 1.6416, 1.9082, 2.0578, 1.7786, 2.2196, 2.0907], + device='cuda:5'), covar=tensor([0.1278, 0.1877, 0.2722, 0.2246, 0.2235, 0.1610, 0.2670, 0.1524], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0192, 0.0238, 0.0256, 0.0252, 0.0208, 0.0217, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:19:10,570 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=153982.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:19:11,849 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153984.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:19:36,572 INFO [finetune.py:976] (5/7) Epoch 27, batch 5100, loss[loss=0.1743, simple_loss=0.2455, pruned_loss=0.05152, over 4828.00 frames. ], tot_loss[loss=0.1678, simple_loss=0.2396, pruned_loss=0.04803, over 958634.89 frames. ], batch size: 30, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:20:06,448 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154049.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:20:19,664 INFO [finetune.py:976] (5/7) Epoch 27, batch 5150, loss[loss=0.1155, simple_loss=0.2011, pruned_loss=0.01497, over 4797.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2393, pruned_loss=0.04799, over 957949.41 frames. ], batch size: 29, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:20:20,251 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.883e+01 1.500e+02 1.789e+02 2.109e+02 4.792e+02, threshold=3.578e+02, percent-clipped=3.0 +2023-03-27 09:20:23,485 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154076.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:20:24,065 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154077.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:20:28,820 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154084.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:20:46,982 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154110.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:20:53,422 INFO [finetune.py:976] (5/7) Epoch 27, batch 5200, loss[loss=0.1644, simple_loss=0.2455, pruned_loss=0.04161, over 4813.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2427, pruned_loss=0.04873, over 958693.90 frames. ], batch size: 38, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:20:56,442 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154125.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:21:04,809 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154137.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:21:12,326 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154145.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:21:34,731 INFO [finetune.py:976] (5/7) Epoch 27, batch 5250, loss[loss=0.2019, simple_loss=0.2738, pruned_loss=0.06496, over 4812.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2447, pruned_loss=0.04966, over 957931.87 frames. ], batch size: 39, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:21:35,329 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.556e+02 1.889e+02 2.346e+02 3.556e+02, threshold=3.778e+02, percent-clipped=0.0 +2023-03-27 09:21:38,513 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0776, 2.1137, 1.4013, 1.9648, 2.0237, 1.7305, 2.6572, 2.0509], + device='cuda:5'), covar=tensor([0.1442, 0.1837, 0.3482, 0.2808, 0.2724, 0.1808, 0.2267, 0.1787], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0237, 0.0255, 0.0250, 0.0207, 0.0216, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:22:08,470 INFO [finetune.py:976] (5/7) Epoch 27, batch 5300, loss[loss=0.1404, simple_loss=0.2216, pruned_loss=0.02958, over 4793.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.2441, pruned_loss=0.04908, over 955348.11 frames. ], batch size: 59, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:22:09,174 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154221.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:22:41,917 INFO [finetune.py:976] (5/7) Epoch 27, batch 5350, loss[loss=0.1559, simple_loss=0.2308, pruned_loss=0.04047, over 4892.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2437, pruned_loss=0.04845, over 955713.70 frames. ], batch size: 43, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:22:42,509 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.999e+01 1.513e+02 1.798e+02 2.139e+02 3.270e+02, threshold=3.596e+02, percent-clipped=0.0 +2023-03-27 09:22:47,881 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154279.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:22:49,751 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154282.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 09:22:52,186 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154286.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:22:54,041 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-27 09:22:55,609 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154291.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:23:01,684 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 09:23:15,340 INFO [finetune.py:976] (5/7) Epoch 27, batch 5400, loss[loss=0.1393, simple_loss=0.2086, pruned_loss=0.03495, over 4810.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2413, pruned_loss=0.04779, over 954294.85 frames. ], batch size: 51, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:23:33,019 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154337.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:23:43,258 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154347.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:23:49,165 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154352.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:24:08,870 INFO [finetune.py:976] (5/7) Epoch 27, batch 5450, loss[loss=0.1757, simple_loss=0.2305, pruned_loss=0.06047, over 4088.00 frames. ], tot_loss[loss=0.1657, simple_loss=0.2379, pruned_loss=0.04672, over 954920.78 frames. ], batch size: 18, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:24:09,462 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.752e+01 1.439e+02 1.730e+02 2.063e+02 4.741e+02, threshold=3.460e+02, percent-clipped=1.0 +2023-03-27 09:24:26,451 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154398.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 09:24:28,261 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4012, 1.4994, 1.6959, 1.6083, 1.6186, 2.9974, 1.3853, 1.5596], + device='cuda:5'), covar=tensor([0.0939, 0.1742, 0.0966, 0.0842, 0.1505, 0.0245, 0.1419, 0.1732], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0077, 0.0092, 0.0080, 0.0086, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 09:24:32,219 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154405.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 09:24:37,963 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4848, 2.4277, 2.0365, 2.4863, 2.2646, 2.2904, 2.3053, 3.2317], + device='cuda:5'), covar=tensor([0.3418, 0.4583, 0.3328, 0.4016, 0.4484, 0.2511, 0.4121, 0.1558], + device='cuda:5'), in_proj_covar=tensor([0.0291, 0.0265, 0.0238, 0.0276, 0.0262, 0.0231, 0.0260, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:24:42,541 INFO [finetune.py:976] (5/7) Epoch 27, batch 5500, loss[loss=0.176, simple_loss=0.2566, pruned_loss=0.04774, over 4903.00 frames. ], tot_loss[loss=0.1639, simple_loss=0.2356, pruned_loss=0.04613, over 955532.11 frames. ], batch size: 43, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:24:49,891 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154432.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:24:54,776 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154440.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:25:27,011 INFO [finetune.py:976] (5/7) Epoch 27, batch 5550, loss[loss=0.2021, simple_loss=0.2746, pruned_loss=0.06481, over 4735.00 frames. ], tot_loss[loss=0.1649, simple_loss=0.2367, pruned_loss=0.04659, over 953352.22 frames. ], batch size: 59, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:25:27,598 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.282e+01 1.501e+02 1.802e+02 2.038e+02 5.335e+02, threshold=3.603e+02, percent-clipped=2.0 +2023-03-27 09:25:29,560 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154474.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:25:57,470 INFO [finetune.py:976] (5/7) Epoch 27, batch 5600, loss[loss=0.1956, simple_loss=0.2801, pruned_loss=0.05551, over 4801.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2417, pruned_loss=0.04834, over 954182.37 frames. ], batch size: 45, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:26:07,295 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154535.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:26:27,473 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1441, 1.9666, 1.7093, 1.7462, 1.8817, 1.9314, 1.9380, 2.6207], + device='cuda:5'), covar=tensor([0.3654, 0.4245, 0.3216, 0.3561, 0.3731, 0.2341, 0.3398, 0.1710], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0264, 0.0236, 0.0275, 0.0261, 0.0229, 0.0259, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:26:30,306 INFO [finetune.py:976] (5/7) Epoch 27, batch 5650, loss[loss=0.1582, simple_loss=0.2344, pruned_loss=0.04099, over 4900.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2438, pruned_loss=0.04859, over 955659.39 frames. ], batch size: 32, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:26:30,863 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.301e+01 1.398e+02 1.740e+02 2.119e+02 4.723e+02, threshold=3.480e+02, percent-clipped=2.0 +2023-03-27 09:26:39,113 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154577.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 09:26:40,322 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154579.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:27:07,857 INFO [finetune.py:976] (5/7) Epoch 27, batch 5700, loss[loss=0.1645, simple_loss=0.2191, pruned_loss=0.05494, over 3988.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.2403, pruned_loss=0.04846, over 934406.61 frames. ], batch size: 17, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:27:12,038 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154627.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:27:12,689 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154628.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:27:20,888 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154642.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:27:34,185 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154647.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:27:34,739 INFO [finetune.py:976] (5/7) Epoch 28, batch 0, loss[loss=0.1428, simple_loss=0.2246, pruned_loss=0.0305, over 4902.00 frames. ], tot_loss[loss=0.1428, simple_loss=0.2246, pruned_loss=0.0305, over 4902.00 frames. ], batch size: 46, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:27:34,739 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 09:27:45,011 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2184, 1.4500, 1.4527, 0.7493, 1.4635, 1.6553, 1.7073, 1.3994], + device='cuda:5'), covar=tensor([0.0996, 0.0689, 0.0674, 0.0640, 0.0647, 0.0760, 0.0530, 0.0854], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0147, 0.0129, 0.0121, 0.0131, 0.0130, 0.0142, 0.0150], + device='cuda:5'), out_proj_covar=tensor([8.8337e-05, 1.0504e-04, 9.1824e-05, 8.5222e-05, 9.1862e-05, 9.1862e-05, + 1.0083e-04, 1.0708e-04], device='cuda:5') +2023-03-27 09:27:54,285 INFO [finetune.py:1010] (5/7) Epoch 28, validation: loss=0.1583, simple_loss=0.2265, pruned_loss=0.04511, over 2265189.00 frames. +2023-03-27 09:27:54,286 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 09:27:55,418 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8016, 1.8199, 0.9934, 2.5973, 3.0681, 2.4594, 2.3373, 2.2306], + device='cuda:5'), covar=tensor([0.1051, 0.1776, 0.1812, 0.0943, 0.1345, 0.1484, 0.1101, 0.1681], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0091, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 09:27:59,645 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3173, 1.1715, 1.5877, 2.2966, 1.5514, 2.1217, 0.9832, 2.0590], + device='cuda:5'), covar=tensor([0.1811, 0.1832, 0.1287, 0.0957, 0.1068, 0.1533, 0.1686, 0.0734], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0115, 0.0132, 0.0163, 0.0100, 0.0135, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 09:28:08,716 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.011e+02 1.488e+02 1.773e+02 2.221e+02 3.199e+02, threshold=3.546e+02, percent-clipped=0.0 +2023-03-27 09:28:19,993 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8181, 1.7667, 1.9318, 1.1779, 1.8980, 1.8792, 1.8840, 1.5486], + device='cuda:5'), covar=tensor([0.0673, 0.0721, 0.0730, 0.0993, 0.0747, 0.0754, 0.0618, 0.1221], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0138, 0.0141, 0.0120, 0.0129, 0.0140, 0.0140, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:28:21,213 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154689.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:28:24,052 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154693.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 09:28:27,013 INFO [finetune.py:976] (5/7) Epoch 28, batch 50, loss[loss=0.1559, simple_loss=0.2376, pruned_loss=0.03713, over 4775.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.2479, pruned_loss=0.0507, over 215999.81 frames. ], batch size: 28, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:28:32,659 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154705.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:28:35,797 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0735, 2.0144, 1.7926, 2.0431, 1.8999, 1.9523, 1.9723, 2.6641], + device='cuda:5'), covar=tensor([0.3703, 0.4282, 0.3176, 0.3825, 0.4194, 0.2349, 0.3681, 0.1589], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0263, 0.0236, 0.0274, 0.0260, 0.0228, 0.0258, 0.0237], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:28:44,264 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154720.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:28:52,104 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154732.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:28:57,940 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154740.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:29:03,136 INFO [finetune.py:976] (5/7) Epoch 28, batch 100, loss[loss=0.1327, simple_loss=0.2097, pruned_loss=0.02786, over 4842.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.2409, pruned_loss=0.04815, over 380246.38 frames. ], batch size: 25, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:29:10,691 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.6644, 3.2432, 3.4083, 3.4065, 3.2500, 3.1743, 3.7059, 1.3097], + device='cuda:5'), covar=tensor([0.1563, 0.1753, 0.1921, 0.2033, 0.2608, 0.2969, 0.1815, 0.8171], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0247, 0.0283, 0.0296, 0.0335, 0.0287, 0.0305, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:29:11,915 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154753.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 09:29:13,672 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4723, 2.4533, 2.5937, 1.8216, 2.5225, 2.6182, 2.5849, 2.0601], + device='cuda:5'), covar=tensor([0.0520, 0.0547, 0.0559, 0.0754, 0.0589, 0.0621, 0.0507, 0.0979], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0141, 0.0120, 0.0128, 0.0139, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:29:26,906 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.078e+02 1.413e+02 1.713e+02 2.093e+02 4.180e+02, threshold=3.426e+02, percent-clipped=2.0 +2023-03-27 09:29:32,450 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154780.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:29:33,117 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154781.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:29:37,832 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154788.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:29:44,902 INFO [finetune.py:976] (5/7) Epoch 28, batch 150, loss[loss=0.1655, simple_loss=0.2319, pruned_loss=0.0495, over 4824.00 frames. ], tot_loss[loss=0.1646, simple_loss=0.2355, pruned_loss=0.04683, over 508887.85 frames. ], batch size: 30, lr: 2.90e-03, grad_scale: 32.0 +2023-03-27 09:29:51,241 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-27 09:29:56,986 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5065, 1.3709, 1.2366, 1.4919, 1.5951, 1.4936, 1.0513, 1.3020], + device='cuda:5'), covar=tensor([0.1787, 0.1700, 0.1607, 0.1372, 0.1476, 0.1071, 0.2388, 0.1565], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0212, 0.0216, 0.0200, 0.0246, 0.0192, 0.0218, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:29:57,837 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-27 09:29:59,449 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.10 vs. limit=2.0 +2023-03-27 09:30:06,041 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154830.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:30:18,389 INFO [finetune.py:976] (5/7) Epoch 28, batch 200, loss[loss=0.1645, simple_loss=0.2379, pruned_loss=0.04561, over 4811.00 frames. ], tot_loss[loss=0.166, simple_loss=0.236, pruned_loss=0.04797, over 607920.53 frames. ], batch size: 40, lr: 2.89e-03, grad_scale: 32.0 +2023-03-27 09:30:40,610 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.190e+01 1.565e+02 1.831e+02 2.234e+02 3.641e+02, threshold=3.662e+02, percent-clipped=1.0 +2023-03-27 09:30:48,057 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154877.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:31:02,679 INFO [finetune.py:976] (5/7) Epoch 28, batch 250, loss[loss=0.2618, simple_loss=0.3052, pruned_loss=0.1092, over 4077.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2381, pruned_loss=0.04823, over 684413.80 frames. ], batch size: 65, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:31:11,489 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6962, 1.3102, 0.9325, 1.6454, 2.1030, 1.3553, 1.4911, 1.5571], + device='cuda:5'), covar=tensor([0.1480, 0.2087, 0.1804, 0.1157, 0.2043, 0.1976, 0.1490, 0.2010], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0110, 0.0092, 0.0119, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 09:31:15,163 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.3477, 3.7077, 4.0101, 4.2235, 4.0799, 3.8050, 4.4102, 1.4220], + device='cuda:5'), covar=tensor([0.0813, 0.0923, 0.0912, 0.0926, 0.1358, 0.1703, 0.0868, 0.5890], + device='cuda:5'), in_proj_covar=tensor([0.0353, 0.0248, 0.0285, 0.0297, 0.0336, 0.0288, 0.0306, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:31:20,578 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154925.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:31:31,427 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154942.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:31:34,996 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154947.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:31:35,507 INFO [finetune.py:976] (5/7) Epoch 28, batch 300, loss[loss=0.2162, simple_loss=0.2868, pruned_loss=0.07278, over 4717.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2419, pruned_loss=0.04888, over 743267.52 frames. ], batch size: 59, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:31:51,474 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.026e+02 1.523e+02 1.869e+02 2.212e+02 3.864e+02, threshold=3.739e+02, percent-clipped=1.0 +2023-03-27 09:31:57,865 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.36 vs. limit=5.0 +2023-03-27 09:31:59,677 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154976.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:32:07,921 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154984.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:32:11,541 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154990.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:32:13,402 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154993.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:32:15,017 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=154995.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:32:16,797 INFO [finetune.py:976] (5/7) Epoch 28, batch 350, loss[loss=0.1247, simple_loss=0.1948, pruned_loss=0.02725, over 4705.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2451, pruned_loss=0.0501, over 790854.06 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:32:19,701 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-03-27 09:32:43,061 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155037.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:32:45,384 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=155041.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 09:32:45,965 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9026, 4.9167, 4.6228, 2.6833, 5.0011, 3.7031, 0.9536, 3.5554], + device='cuda:5'), covar=tensor([0.2177, 0.1827, 0.1631, 0.3014, 0.1072, 0.0854, 0.4737, 0.1220], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0180, 0.0160, 0.0131, 0.0162, 0.0124, 0.0149, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 09:32:49,883 INFO [finetune.py:976] (5/7) Epoch 28, batch 400, loss[loss=0.2123, simple_loss=0.2801, pruned_loss=0.07223, over 4825.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.2459, pruned_loss=0.04993, over 828747.18 frames. ], batch size: 39, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:33:13,488 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155069.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:33:15,064 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.906e+01 1.559e+02 1.879e+02 2.352e+02 4.263e+02, threshold=3.758e+02, percent-clipped=3.0 +2023-03-27 09:33:18,283 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155076.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:33:31,488 INFO [finetune.py:976] (5/7) Epoch 28, batch 450, loss[loss=0.187, simple_loss=0.2697, pruned_loss=0.05212, over 4833.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2432, pruned_loss=0.04895, over 854638.41 frames. ], batch size: 49, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:33:39,604 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.30 vs. limit=5.0 +2023-03-27 09:33:47,114 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0330, 1.9137, 2.5745, 4.0649, 2.7200, 2.8494, 0.8412, 3.4914], + device='cuda:5'), covar=tensor([0.1714, 0.1335, 0.1365, 0.0500, 0.0795, 0.1452, 0.2098, 0.0371], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0116, 0.0132, 0.0164, 0.0100, 0.0135, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 09:33:54,286 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155130.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:33:54,326 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155130.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:34:05,142 INFO [finetune.py:976] (5/7) Epoch 28, batch 500, loss[loss=0.1559, simple_loss=0.2405, pruned_loss=0.03562, over 4828.00 frames. ], tot_loss[loss=0.1688, simple_loss=0.241, pruned_loss=0.04826, over 878419.22 frames. ], batch size: 33, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:34:28,556 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.687e+01 1.475e+02 1.683e+02 2.204e+02 4.497e+02, threshold=3.366e+02, percent-clipped=1.0 +2023-03-27 09:34:30,439 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6881, 2.4919, 2.2678, 1.0418, 2.3838, 2.1121, 1.9503, 2.3267], + device='cuda:5'), covar=tensor([0.0808, 0.0869, 0.1440, 0.2042, 0.1276, 0.1890, 0.1857, 0.0901], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0191, 0.0200, 0.0180, 0.0209, 0.0209, 0.0223, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:34:37,153 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=155178.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:34:38,426 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4442, 1.5201, 2.0963, 1.7982, 1.8510, 4.1102, 1.5335, 1.6959], + device='cuda:5'), covar=tensor([0.0978, 0.1742, 0.1210, 0.0907, 0.1431, 0.0168, 0.1400, 0.1684], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0079], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 09:34:49,247 INFO [finetune.py:976] (5/7) Epoch 28, batch 550, loss[loss=0.1568, simple_loss=0.2245, pruned_loss=0.04453, over 4806.00 frames. ], tot_loss[loss=0.1669, simple_loss=0.2383, pruned_loss=0.0478, over 896248.44 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:35:03,628 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2406, 2.1691, 1.8794, 1.9789, 2.7458, 2.6478, 2.3125, 2.1078], + device='cuda:5'), covar=tensor([0.0412, 0.0373, 0.0589, 0.0350, 0.0258, 0.0567, 0.0274, 0.0440], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0106, 0.0148, 0.0111, 0.0102, 0.0116, 0.0103, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.8264e-05, 8.1370e-05, 1.1529e-04, 8.4988e-05, 7.8724e-05, 8.5108e-05, + 7.6631e-05, 8.6072e-05], device='cuda:5') +2023-03-27 09:35:23,080 INFO [finetune.py:976] (5/7) Epoch 28, batch 600, loss[loss=0.2161, simple_loss=0.2875, pruned_loss=0.07228, over 4861.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2388, pruned_loss=0.04813, over 909945.54 frames. ], batch size: 44, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:35:38,978 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.640e+01 1.454e+02 1.702e+02 1.998e+02 4.828e+02, threshold=3.403e+02, percent-clipped=2.0 +2023-03-27 09:35:53,267 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155284.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:36:05,097 INFO [finetune.py:976] (5/7) Epoch 28, batch 650, loss[loss=0.1794, simple_loss=0.25, pruned_loss=0.05438, over 4735.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2416, pruned_loss=0.04845, over 920172.59 frames. ], batch size: 59, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:36:07,075 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1504, 1.9089, 2.0634, 1.4263, 1.8886, 2.0701, 2.0359, 1.5323], + device='cuda:5'), covar=tensor([0.0473, 0.0647, 0.0612, 0.0834, 0.0792, 0.0657, 0.0555, 0.1219], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0141, 0.0119, 0.0128, 0.0139, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:36:29,094 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=155332.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:36:29,104 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155332.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:36:38,728 INFO [finetune.py:976] (5/7) Epoch 28, batch 700, loss[loss=0.1542, simple_loss=0.2367, pruned_loss=0.03586, over 4743.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2433, pruned_loss=0.0489, over 927542.60 frames. ], batch size: 27, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:36:44,148 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-03-27 09:36:54,660 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.563e+02 1.812e+02 2.261e+02 4.160e+02, threshold=3.625e+02, percent-clipped=3.0 +2023-03-27 09:36:57,813 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155376.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:37:19,500 INFO [finetune.py:976] (5/7) Epoch 28, batch 750, loss[loss=0.1925, simple_loss=0.2583, pruned_loss=0.06336, over 4917.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2441, pruned_loss=0.04871, over 934753.18 frames. ], batch size: 29, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:37:40,163 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=155424.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:37:41,308 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155425.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:37:56,712 INFO [finetune.py:976] (5/7) Epoch 28, batch 800, loss[loss=0.1561, simple_loss=0.2381, pruned_loss=0.03707, over 4821.00 frames. ], tot_loss[loss=0.1689, simple_loss=0.2426, pruned_loss=0.04762, over 939163.67 frames. ], batch size: 39, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:38:02,333 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155457.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:38:11,934 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.111e+02 1.495e+02 1.724e+02 1.967e+02 3.002e+02, threshold=3.447e+02, percent-clipped=0.0 +2023-03-27 09:38:39,883 INFO [finetune.py:976] (5/7) Epoch 28, batch 850, loss[loss=0.1611, simple_loss=0.2294, pruned_loss=0.04633, over 4787.00 frames. ], tot_loss[loss=0.168, simple_loss=0.2406, pruned_loss=0.04774, over 935664.45 frames. ], batch size: 51, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:38:42,406 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9358, 1.7813, 1.9814, 1.2712, 1.9262, 1.9300, 1.8776, 1.6080], + device='cuda:5'), covar=tensor([0.0513, 0.0679, 0.0603, 0.0842, 0.0804, 0.0654, 0.0575, 0.1144], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0141, 0.0119, 0.0129, 0.0139, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:38:52,660 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155518.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:38:58,487 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2428, 2.0274, 2.2577, 1.5685, 2.1604, 2.2488, 2.2226, 1.7207], + device='cuda:5'), covar=tensor([0.0548, 0.0680, 0.0645, 0.0854, 0.0661, 0.0682, 0.0619, 0.1115], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0141, 0.0119, 0.0128, 0.0138, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:39:13,737 INFO [finetune.py:976] (5/7) Epoch 28, batch 900, loss[loss=0.1785, simple_loss=0.2503, pruned_loss=0.05333, over 4752.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.239, pruned_loss=0.04766, over 941801.95 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:39:28,251 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.783e+01 1.413e+02 1.787e+02 2.288e+02 4.282e+02, threshold=3.575e+02, percent-clipped=3.0 +2023-03-27 09:39:38,345 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3373, 1.4346, 1.4638, 0.7564, 1.5166, 1.6762, 1.7375, 1.3130], + device='cuda:5'), covar=tensor([0.0913, 0.0722, 0.0555, 0.0555, 0.0453, 0.0696, 0.0301, 0.0701], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0146, 0.0129, 0.0122, 0.0131, 0.0130, 0.0142, 0.0149], + device='cuda:5'), out_proj_covar=tensor([8.8288e-05, 1.0497e-04, 9.2181e-05, 8.5793e-05, 9.1839e-05, 9.1941e-05, + 1.0088e-04, 1.0668e-04], device='cuda:5') +2023-03-27 09:39:54,473 INFO [finetune.py:976] (5/7) Epoch 28, batch 950, loss[loss=0.1668, simple_loss=0.2304, pruned_loss=0.05162, over 4827.00 frames. ], tot_loss[loss=0.1666, simple_loss=0.2382, pruned_loss=0.04747, over 945573.03 frames. ], batch size: 40, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:40:02,622 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155605.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:40:20,529 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155632.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:40:31,713 INFO [finetune.py:976] (5/7) Epoch 28, batch 1000, loss[loss=0.1702, simple_loss=0.2508, pruned_loss=0.04484, over 4811.00 frames. ], tot_loss[loss=0.168, simple_loss=0.2401, pruned_loss=0.04792, over 947015.58 frames. ], batch size: 38, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:40:37,313 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155657.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:40:42,774 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155666.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:40:44,894 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.32 vs. limit=5.0 +2023-03-27 09:40:45,714 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.619e+01 1.519e+02 1.814e+02 2.190e+02 3.109e+02, threshold=3.628e+02, percent-clipped=0.0 +2023-03-27 09:40:54,192 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=155680.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:41:05,858 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0987, 1.8539, 2.4172, 1.5662, 2.0100, 2.3771, 1.6883, 2.4869], + device='cuda:5'), covar=tensor([0.1197, 0.1939, 0.1510, 0.2075, 0.1008, 0.1342, 0.2762, 0.0759], + device='cuda:5'), in_proj_covar=tensor([0.0193, 0.0209, 0.0195, 0.0191, 0.0175, 0.0215, 0.0220, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:41:13,974 INFO [finetune.py:976] (5/7) Epoch 28, batch 1050, loss[loss=0.1527, simple_loss=0.2309, pruned_loss=0.03727, over 4761.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2424, pruned_loss=0.04846, over 949644.65 frames. ], batch size: 28, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:41:26,786 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155718.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:41:31,004 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155725.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:41:45,173 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 09:41:46,713 INFO [finetune.py:976] (5/7) Epoch 28, batch 1100, loss[loss=0.1526, simple_loss=0.2375, pruned_loss=0.03384, over 4797.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2453, pruned_loss=0.04994, over 950200.95 frames. ], batch size: 51, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:42:01,623 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.104e+02 1.629e+02 1.915e+02 2.256e+02 9.973e+02, threshold=3.830e+02, percent-clipped=2.0 +2023-03-27 09:42:02,899 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=155773.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:42:10,616 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155784.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:42:21,326 INFO [finetune.py:976] (5/7) Epoch 28, batch 1150, loss[loss=0.1662, simple_loss=0.2367, pruned_loss=0.04787, over 4919.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2457, pruned_loss=0.04968, over 951858.30 frames. ], batch size: 33, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:42:39,763 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155813.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:43:01,209 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155845.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 09:43:02,854 INFO [finetune.py:976] (5/7) Epoch 28, batch 1200, loss[loss=0.1554, simple_loss=0.2288, pruned_loss=0.041, over 4820.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2431, pruned_loss=0.04859, over 952617.02 frames. ], batch size: 38, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:43:18,194 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.526e+02 1.746e+02 2.167e+02 3.236e+02, threshold=3.492e+02, percent-clipped=0.0 +2023-03-27 09:43:45,620 INFO [finetune.py:976] (5/7) Epoch 28, batch 1250, loss[loss=0.2123, simple_loss=0.2588, pruned_loss=0.08292, over 4268.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2412, pruned_loss=0.04871, over 953154.06 frames. ], batch size: 65, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:44:19,210 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3811, 2.3158, 1.7995, 2.2003, 2.1716, 1.9669, 2.5323, 2.3863], + device='cuda:5'), covar=tensor([0.1248, 0.1863, 0.2770, 0.2601, 0.2569, 0.1669, 0.3447, 0.1541], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0189, 0.0235, 0.0253, 0.0248, 0.0207, 0.0214, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:44:22,110 INFO [finetune.py:976] (5/7) Epoch 28, batch 1300, loss[loss=0.1702, simple_loss=0.2133, pruned_loss=0.06358, over 4085.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2393, pruned_loss=0.04883, over 951904.77 frames. ], batch size: 17, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:44:32,017 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155961.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:44:38,007 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.818e+01 1.476e+02 1.729e+02 2.197e+02 4.050e+02, threshold=3.458e+02, percent-clipped=1.0 +2023-03-27 09:44:55,320 INFO [finetune.py:976] (5/7) Epoch 28, batch 1350, loss[loss=0.1439, simple_loss=0.2189, pruned_loss=0.03447, over 4778.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2401, pruned_loss=0.04914, over 952899.89 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:45:10,151 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156013.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:45:30,432 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6499, 1.3023, 0.9007, 1.7552, 2.2586, 1.4127, 1.6345, 1.6828], + device='cuda:5'), covar=tensor([0.2023, 0.2556, 0.2274, 0.1502, 0.2047, 0.2534, 0.1818, 0.2596], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0092, 0.0120, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 09:45:31,621 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.9808, 3.4728, 3.6628, 3.8304, 3.7682, 3.5356, 4.0581, 1.3232], + device='cuda:5'), covar=tensor([0.0822, 0.0904, 0.1015, 0.1119, 0.1226, 0.1661, 0.0710, 0.6087], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0245, 0.0284, 0.0296, 0.0334, 0.0287, 0.0304, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:45:32,752 INFO [finetune.py:976] (5/7) Epoch 28, batch 1400, loss[loss=0.2327, simple_loss=0.3041, pruned_loss=0.08062, over 4165.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2442, pruned_loss=0.04984, over 953914.92 frames. ], batch size: 65, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:45:48,233 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156070.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:45:48,708 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.532e+02 1.806e+02 2.221e+02 4.474e+02, threshold=3.612e+02, percent-clipped=3.0 +2023-03-27 09:46:06,150 INFO [finetune.py:976] (5/7) Epoch 28, batch 1450, loss[loss=0.2099, simple_loss=0.2882, pruned_loss=0.06578, over 4252.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2463, pruned_loss=0.05039, over 954891.31 frames. ], batch size: 65, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:46:14,586 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7824, 1.6821, 1.4805, 1.8305, 2.1689, 1.8798, 1.5229, 1.4488], + device='cuda:5'), covar=tensor([0.1984, 0.1816, 0.1840, 0.1515, 0.1538, 0.1131, 0.2382, 0.1822], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0212, 0.0216, 0.0200, 0.0246, 0.0190, 0.0217, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:46:23,158 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156113.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:46:35,165 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156131.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 09:46:40,536 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156140.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 09:46:45,776 INFO [finetune.py:976] (5/7) Epoch 28, batch 1500, loss[loss=0.1809, simple_loss=0.2496, pruned_loss=0.05613, over 4223.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2464, pruned_loss=0.05033, over 955145.26 frames. ], batch size: 65, lr: 2.89e-03, grad_scale: 64.0 +2023-03-27 09:46:47,656 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-03-27 09:46:54,182 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=156161.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:47:02,075 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.065e+02 1.585e+02 1.899e+02 2.331e+02 3.577e+02, threshold=3.799e+02, percent-clipped=0.0 +2023-03-27 09:47:18,928 INFO [finetune.py:976] (5/7) Epoch 28, batch 1550, loss[loss=0.1567, simple_loss=0.2272, pruned_loss=0.04308, over 4846.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2448, pruned_loss=0.04927, over 956124.86 frames. ], batch size: 49, lr: 2.89e-03, grad_scale: 32.0 +2023-03-27 09:47:21,349 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-27 09:47:30,568 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156216.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:47:59,288 INFO [finetune.py:976] (5/7) Epoch 28, batch 1600, loss[loss=0.1476, simple_loss=0.2228, pruned_loss=0.03623, over 4828.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2428, pruned_loss=0.04874, over 956755.98 frames. ], batch size: 47, lr: 2.89e-03, grad_scale: 32.0 +2023-03-27 09:48:08,687 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156261.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:48:14,075 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9653, 4.3340, 4.1899, 2.6209, 4.4618, 3.4033, 1.0555, 3.0532], + device='cuda:5'), covar=tensor([0.2499, 0.2069, 0.1292, 0.2769, 0.0807, 0.0787, 0.4468, 0.1488], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0179, 0.0160, 0.0130, 0.0163, 0.0123, 0.0149, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 09:48:15,841 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.043e+02 1.435e+02 1.767e+02 2.111e+02 3.704e+02, threshold=3.535e+02, percent-clipped=0.0 +2023-03-27 09:48:19,977 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156277.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:48:27,876 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156290.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:48:27,883 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6759, 1.6208, 1.4120, 1.5891, 1.9515, 1.8378, 1.6882, 1.4841], + device='cuda:5'), covar=tensor([0.0282, 0.0310, 0.0600, 0.0331, 0.0204, 0.0464, 0.0301, 0.0364], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0106, 0.0149, 0.0112, 0.0102, 0.0116, 0.0104, 0.0114], + device='cuda:5'), out_proj_covar=tensor([7.8628e-05, 8.1223e-05, 1.1577e-04, 8.5204e-05, 7.8535e-05, 8.5296e-05, + 7.7087e-05, 8.6305e-05], device='cuda:5') +2023-03-27 09:48:32,614 INFO [finetune.py:976] (5/7) Epoch 28, batch 1650, loss[loss=0.1567, simple_loss=0.2227, pruned_loss=0.04536, over 4701.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2396, pruned_loss=0.0475, over 955740.60 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 32.0 +2023-03-27 09:48:40,817 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=156309.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:48:43,254 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156313.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:49:18,368 INFO [finetune.py:976] (5/7) Epoch 28, batch 1700, loss[loss=0.1239, simple_loss=0.2035, pruned_loss=0.02219, over 4756.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.2389, pruned_loss=0.04794, over 954932.16 frames. ], batch size: 27, lr: 2.88e-03, grad_scale: 32.0 +2023-03-27 09:49:20,346 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156351.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:49:26,378 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5465, 1.2023, 0.8684, 1.6185, 2.0643, 1.4684, 1.5663, 1.7066], + device='cuda:5'), covar=tensor([0.1573, 0.2091, 0.1833, 0.1126, 0.2027, 0.1798, 0.1329, 0.1716], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0110, 0.0092, 0.0120, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 09:49:27,456 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=156361.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:49:34,437 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.260e+01 1.525e+02 1.796e+02 2.204e+02 4.546e+02, threshold=3.593e+02, percent-clipped=3.0 +2023-03-27 09:49:51,286 INFO [finetune.py:976] (5/7) Epoch 28, batch 1750, loss[loss=0.1524, simple_loss=0.2258, pruned_loss=0.03948, over 4706.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2403, pruned_loss=0.04832, over 954377.71 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 32.0 +2023-03-27 09:50:04,712 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0440, 4.8085, 4.5378, 2.5965, 4.9600, 3.7660, 1.2365, 3.3523], + device='cuda:5'), covar=tensor([0.2020, 0.1675, 0.1192, 0.2889, 0.0647, 0.0723, 0.3954, 0.1276], + device='cuda:5'), in_proj_covar=tensor([0.0149, 0.0178, 0.0158, 0.0129, 0.0162, 0.0122, 0.0147, 0.0124], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 09:50:09,511 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156426.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 09:50:19,438 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156440.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 09:50:24,159 INFO [finetune.py:976] (5/7) Epoch 28, batch 1800, loss[loss=0.1989, simple_loss=0.243, pruned_loss=0.07745, over 4218.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2434, pruned_loss=0.04912, over 953762.67 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 32.0 +2023-03-27 09:50:39,987 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.522e+02 1.851e+02 2.291e+02 4.651e+02, threshold=3.702e+02, percent-clipped=5.0 +2023-03-27 09:50:51,504 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=156488.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:50:56,523 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.10 vs. limit=5.0 +2023-03-27 09:50:57,550 INFO [finetune.py:976] (5/7) Epoch 28, batch 1850, loss[loss=0.163, simple_loss=0.2313, pruned_loss=0.04731, over 4764.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2451, pruned_loss=0.04949, over 955541.63 frames. ], batch size: 54, lr: 2.88e-03, grad_scale: 32.0 +2023-03-27 09:51:40,564 INFO [finetune.py:976] (5/7) Epoch 28, batch 1900, loss[loss=0.1519, simple_loss=0.2319, pruned_loss=0.03589, over 4883.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2463, pruned_loss=0.04986, over 954462.65 frames. ], batch size: 35, lr: 2.88e-03, grad_scale: 32.0 +2023-03-27 09:51:56,029 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.831e+01 1.540e+02 1.871e+02 2.242e+02 4.934e+02, threshold=3.741e+02, percent-clipped=1.0 +2023-03-27 09:51:56,114 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156572.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:52:13,685 INFO [finetune.py:976] (5/7) Epoch 28, batch 1950, loss[loss=0.1957, simple_loss=0.2586, pruned_loss=0.06637, over 4864.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2455, pruned_loss=0.05001, over 954318.34 frames. ], batch size: 31, lr: 2.88e-03, grad_scale: 32.0 +2023-03-27 09:52:20,497 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-03-27 09:52:46,375 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156646.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:52:47,510 INFO [finetune.py:976] (5/7) Epoch 28, batch 2000, loss[loss=0.1419, simple_loss=0.2102, pruned_loss=0.03674, over 4900.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2418, pruned_loss=0.04895, over 953936.72 frames. ], batch size: 35, lr: 2.88e-03, grad_scale: 32.0 +2023-03-27 09:53:04,320 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.919e+01 1.529e+02 1.760e+02 2.169e+02 4.761e+02, threshold=3.520e+02, percent-clipped=1.0 +2023-03-27 09:53:29,985 INFO [finetune.py:976] (5/7) Epoch 28, batch 2050, loss[loss=0.1666, simple_loss=0.2248, pruned_loss=0.05419, over 4061.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2389, pruned_loss=0.04818, over 950883.51 frames. ], batch size: 65, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:53:30,056 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6901, 3.7613, 3.5870, 1.7970, 3.9608, 2.9280, 0.8107, 2.6344], + device='cuda:5'), covar=tensor([0.2405, 0.2795, 0.1479, 0.3693, 0.1150, 0.1033, 0.4906, 0.1748], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0179, 0.0159, 0.0129, 0.0163, 0.0123, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 09:53:31,842 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0984, 2.0709, 1.4662, 2.1235, 2.0879, 1.7528, 2.7142, 2.1256], + device='cuda:5'), covar=tensor([0.1497, 0.1882, 0.3301, 0.2898, 0.2783, 0.1828, 0.2139, 0.1864], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0190, 0.0237, 0.0255, 0.0251, 0.0208, 0.0215, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:53:47,942 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156726.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 09:54:08,964 INFO [finetune.py:976] (5/7) Epoch 28, batch 2100, loss[loss=0.1571, simple_loss=0.2342, pruned_loss=0.03996, over 4940.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2396, pruned_loss=0.04834, over 952704.20 frames. ], batch size: 33, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:54:10,239 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156749.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:54:37,771 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.070e+02 1.521e+02 1.844e+02 2.179e+02 3.224e+02, threshold=3.687e+02, percent-clipped=0.0 +2023-03-27 09:54:38,478 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=156774.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:54:47,431 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.23 vs. limit=5.0 +2023-03-27 09:54:54,965 INFO [finetune.py:976] (5/7) Epoch 28, batch 2150, loss[loss=0.2001, simple_loss=0.2808, pruned_loss=0.05968, over 4779.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2429, pruned_loss=0.04915, over 953005.73 frames. ], batch size: 54, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:55:02,851 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1846, 1.6676, 1.0668, 2.0962, 2.2686, 2.1139, 1.9033, 2.0788], + device='cuda:5'), covar=tensor([0.1225, 0.1658, 0.1998, 0.0930, 0.1838, 0.1684, 0.1145, 0.1530], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0109, 0.0092, 0.0120, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 09:55:03,477 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156810.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:55:23,335 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.52 vs. limit=5.0 +2023-03-27 09:55:27,788 INFO [finetune.py:976] (5/7) Epoch 28, batch 2200, loss[loss=0.1365, simple_loss=0.206, pruned_loss=0.03352, over 4739.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2443, pruned_loss=0.04914, over 953861.44 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:55:36,800 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9524, 1.9006, 2.1491, 1.3799, 2.0031, 2.1324, 2.1425, 1.5956], + device='cuda:5'), covar=tensor([0.0658, 0.0709, 0.0644, 0.0915, 0.0786, 0.0632, 0.0584, 0.1281], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0140, 0.0118, 0.0128, 0.0138, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:55:44,125 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7968, 4.1382, 3.9273, 1.9981, 4.2782, 3.1736, 1.1190, 3.0232], + device='cuda:5'), covar=tensor([0.2011, 0.1675, 0.1286, 0.2857, 0.0777, 0.0809, 0.3629, 0.1227], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0178, 0.0159, 0.0129, 0.0162, 0.0123, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 09:55:44,157 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156872.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:55:44,654 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.040e+02 1.543e+02 1.740e+02 2.127e+02 4.555e+02, threshold=3.480e+02, percent-clipped=1.0 +2023-03-27 09:55:52,562 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156885.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:56:01,290 INFO [finetune.py:976] (5/7) Epoch 28, batch 2250, loss[loss=0.1646, simple_loss=0.2382, pruned_loss=0.04552, over 4827.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2448, pruned_loss=0.04922, over 952746.87 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:56:15,018 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8148, 3.3882, 3.3325, 1.8867, 3.6200, 2.7108, 1.2169, 2.4878], + device='cuda:5'), covar=tensor([0.2871, 0.1939, 0.1397, 0.2900, 0.1051, 0.1005, 0.3504, 0.1524], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0178, 0.0159, 0.0129, 0.0162, 0.0123, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 09:56:16,206 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=156920.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:56:31,031 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1345, 3.6504, 3.7906, 4.0206, 3.8369, 3.5999, 4.2338, 1.2729], + device='cuda:5'), covar=tensor([0.0874, 0.0879, 0.1014, 0.1012, 0.1493, 0.1876, 0.0811, 0.6030], + device='cuda:5'), in_proj_covar=tensor([0.0350, 0.0244, 0.0282, 0.0293, 0.0333, 0.0285, 0.0304, 0.0302], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:56:32,863 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156946.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:56:32,881 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156946.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:56:33,999 INFO [finetune.py:976] (5/7) Epoch 28, batch 2300, loss[loss=0.1595, simple_loss=0.2285, pruned_loss=0.04526, over 4761.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2445, pruned_loss=0.04924, over 949926.04 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:57:00,121 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.437e+02 1.655e+02 2.039e+02 3.893e+02, threshold=3.311e+02, percent-clipped=1.0 +2023-03-27 09:57:10,966 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-03-27 09:57:17,550 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=156994.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:57:20,396 INFO [finetune.py:976] (5/7) Epoch 28, batch 2350, loss[loss=0.1915, simple_loss=0.2551, pruned_loss=0.06398, over 4898.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2417, pruned_loss=0.04854, over 948788.12 frames. ], batch size: 43, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:57:20,567 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-03-27 09:57:28,268 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157010.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:57:52,968 INFO [finetune.py:976] (5/7) Epoch 28, batch 2400, loss[loss=0.1804, simple_loss=0.2514, pruned_loss=0.05471, over 4936.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2411, pruned_loss=0.04877, over 950128.34 frames. ], batch size: 38, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:58:08,930 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157071.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:58:12,820 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.019e+02 1.490e+02 1.799e+02 2.218e+02 3.254e+02, threshold=3.597e+02, percent-clipped=0.0 +2023-03-27 09:58:26,418 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9783, 1.3967, 2.0126, 2.0145, 1.8367, 1.7352, 1.9293, 1.9042], + device='cuda:5'), covar=tensor([0.3448, 0.3644, 0.2974, 0.3500, 0.4463, 0.3752, 0.3822, 0.2724], + device='cuda:5'), in_proj_covar=tensor([0.0268, 0.0249, 0.0269, 0.0298, 0.0297, 0.0274, 0.0303, 0.0253], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:58:28,589 INFO [finetune.py:976] (5/7) Epoch 28, batch 2450, loss[loss=0.188, simple_loss=0.2643, pruned_loss=0.05583, over 4791.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2389, pruned_loss=0.04791, over 951617.41 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:58:33,536 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157105.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:58:39,064 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5928, 1.4982, 1.0943, 0.3044, 1.2432, 1.4379, 1.3529, 1.3410], + device='cuda:5'), covar=tensor([0.1094, 0.0828, 0.1399, 0.1890, 0.1436, 0.2411, 0.2320, 0.0945], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0192, 0.0202, 0.0182, 0.0211, 0.0211, 0.0225, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 09:58:57,714 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157142.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 09:59:01,795 INFO [finetune.py:976] (5/7) Epoch 28, batch 2500, loss[loss=0.188, simple_loss=0.2707, pruned_loss=0.05267, over 4823.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2417, pruned_loss=0.04919, over 952678.27 frames. ], batch size: 40, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:59:12,013 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.24 vs. limit=5.0 +2023-03-27 09:59:23,961 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.586e+02 1.871e+02 2.257e+02 5.817e+02, threshold=3.742e+02, percent-clipped=3.0 +2023-03-27 09:59:51,557 INFO [finetune.py:976] (5/7) Epoch 28, batch 2550, loss[loss=0.1901, simple_loss=0.2655, pruned_loss=0.05737, over 4825.00 frames. ], tot_loss[loss=0.1725, simple_loss=0.2449, pruned_loss=0.05008, over 954095.14 frames. ], batch size: 40, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 09:59:55,386 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157203.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:00:20,723 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157241.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:00:24,904 INFO [finetune.py:976] (5/7) Epoch 28, batch 2600, loss[loss=0.1907, simple_loss=0.2592, pruned_loss=0.06107, over 4743.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2464, pruned_loss=0.0502, over 953907.29 frames. ], batch size: 59, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:00:37,837 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7683, 3.6074, 3.4804, 1.8164, 3.6795, 3.0232, 1.1969, 2.7940], + device='cuda:5'), covar=tensor([0.2110, 0.1992, 0.1460, 0.3166, 0.1065, 0.0911, 0.3869, 0.1479], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0180, 0.0160, 0.0130, 0.0163, 0.0123, 0.0149, 0.0126], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 10:00:41,899 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.034e+02 1.561e+02 1.846e+02 2.212e+02 4.271e+02, threshold=3.692e+02, percent-clipped=1.0 +2023-03-27 10:00:57,757 INFO [finetune.py:976] (5/7) Epoch 28, batch 2650, loss[loss=0.1741, simple_loss=0.2426, pruned_loss=0.05282, over 4913.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.2466, pruned_loss=0.04987, over 953235.36 frames. ], batch size: 42, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:01:30,663 INFO [finetune.py:976] (5/7) Epoch 28, batch 2700, loss[loss=0.1811, simple_loss=0.2453, pruned_loss=0.05839, over 4874.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2448, pruned_loss=0.04932, over 954607.71 frames. ], batch size: 34, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:01:35,643 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157356.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:01:42,597 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157366.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:01:47,173 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.455e+02 1.749e+02 2.146e+02 4.370e+02, threshold=3.498e+02, percent-clipped=1.0 +2023-03-27 10:01:53,273 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157382.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:02:12,807 INFO [finetune.py:976] (5/7) Epoch 28, batch 2750, loss[loss=0.1905, simple_loss=0.2573, pruned_loss=0.06184, over 4839.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2427, pruned_loss=0.04869, over 953577.84 frames. ], batch size: 49, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:02:20,321 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157404.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 10:02:20,951 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157405.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:02:21,576 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8787, 1.7767, 1.5600, 2.0078, 2.2180, 2.0339, 1.5192, 1.5783], + device='cuda:5'), covar=tensor([0.2219, 0.2033, 0.2037, 0.1759, 0.1706, 0.1191, 0.2510, 0.1933], + device='cuda:5'), in_proj_covar=tensor([0.0247, 0.0212, 0.0216, 0.0200, 0.0246, 0.0191, 0.0217, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:02:22,854 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.18 vs. limit=5.0 +2023-03-27 10:02:29,175 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157417.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:02:46,866 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157443.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:02:50,278 INFO [finetune.py:976] (5/7) Epoch 28, batch 2800, loss[loss=0.1375, simple_loss=0.2084, pruned_loss=0.03335, over 4717.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2382, pruned_loss=0.04716, over 952618.47 frames. ], batch size: 54, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:02:53,299 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=157453.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:03:01,081 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157465.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 10:03:06,316 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.288e+01 1.520e+02 1.688e+02 2.071e+02 7.416e+02, threshold=3.376e+02, percent-clipped=4.0 +2023-03-27 10:03:18,780 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8923, 1.9660, 1.7074, 1.7566, 2.4576, 2.4056, 2.1292, 1.9440], + device='cuda:5'), covar=tensor([0.0409, 0.0365, 0.0566, 0.0357, 0.0246, 0.0595, 0.0359, 0.0449], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0106, 0.0148, 0.0111, 0.0102, 0.0116, 0.0104, 0.0113], + device='cuda:5'), out_proj_covar=tensor([7.8687e-05, 8.0964e-05, 1.1521e-04, 8.5012e-05, 7.8758e-05, 8.5260e-05, + 7.7045e-05, 8.5938e-05], device='cuda:5') +2023-03-27 10:03:23,413 INFO [finetune.py:976] (5/7) Epoch 28, batch 2850, loss[loss=0.1611, simple_loss=0.2302, pruned_loss=0.04603, over 4861.00 frames. ], tot_loss[loss=0.1654, simple_loss=0.2369, pruned_loss=0.04692, over 953033.50 frames. ], batch size: 44, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:03:23,481 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157498.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:03:28,867 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157506.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 10:03:52,498 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157541.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:03:57,079 INFO [finetune.py:976] (5/7) Epoch 28, batch 2900, loss[loss=0.123, simple_loss=0.2071, pruned_loss=0.01945, over 4891.00 frames. ], tot_loss[loss=0.167, simple_loss=0.2393, pruned_loss=0.0473, over 954202.78 frames. ], batch size: 32, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:04:09,790 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157567.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 10:04:10,367 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157568.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:04:13,240 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.581e+01 1.484e+02 1.768e+02 2.098e+02 4.175e+02, threshold=3.535e+02, percent-clipped=1.0 +2023-03-27 10:04:18,531 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8675, 4.0573, 3.8954, 2.0485, 4.2451, 3.2651, 0.8079, 2.9186], + device='cuda:5'), covar=tensor([0.2641, 0.1899, 0.1354, 0.3190, 0.0865, 0.0834, 0.4442, 0.1363], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0182, 0.0161, 0.0131, 0.0165, 0.0125, 0.0150, 0.0127], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 10:04:24,447 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=157589.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:04:32,420 INFO [finetune.py:976] (5/7) Epoch 28, batch 2950, loss[loss=0.1188, simple_loss=0.1969, pruned_loss=0.02041, over 4758.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2417, pruned_loss=0.04756, over 954903.85 frames. ], batch size: 28, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:04:41,066 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-03-27 10:05:06,829 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157629.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:05:23,716 INFO [finetune.py:976] (5/7) Epoch 28, batch 3000, loss[loss=0.1539, simple_loss=0.2284, pruned_loss=0.03971, over 4757.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2444, pruned_loss=0.04936, over 953970.76 frames. ], batch size: 28, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:05:23,716 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 10:05:27,052 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2406, 2.0950, 1.8125, 2.0031, 2.1973, 1.9348, 2.3385, 2.2411], + device='cuda:5'), covar=tensor([0.1304, 0.1967, 0.2927, 0.2331, 0.2678, 0.1713, 0.2981, 0.1679], + device='cuda:5'), in_proj_covar=tensor([0.0189, 0.0190, 0.0237, 0.0254, 0.0250, 0.0207, 0.0215, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:05:29,774 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2737, 1.3024, 1.3345, 0.7116, 1.2814, 1.4963, 1.5540, 1.2892], + device='cuda:5'), covar=tensor([0.0894, 0.0473, 0.0519, 0.0504, 0.0566, 0.0575, 0.0326, 0.0590], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0149, 0.0130, 0.0123, 0.0132, 0.0131, 0.0143, 0.0152], + device='cuda:5'), out_proj_covar=tensor([8.8922e-05, 1.0667e-04, 9.2651e-05, 8.6451e-05, 9.2698e-05, 9.2844e-05, + 1.0146e-04, 1.0827e-04], device='cuda:5') +2023-03-27 10:05:32,259 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.9103, 3.4700, 3.6257, 3.8059, 3.6422, 3.4527, 3.9572, 1.3664], + device='cuda:5'), covar=tensor([0.0956, 0.1007, 0.1042, 0.1048, 0.1526, 0.1910, 0.0823, 0.5818], + device='cuda:5'), in_proj_covar=tensor([0.0351, 0.0245, 0.0285, 0.0294, 0.0334, 0.0285, 0.0305, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:05:34,508 INFO [finetune.py:1010] (5/7) Epoch 28, validation: loss=0.1567, simple_loss=0.2243, pruned_loss=0.04455, over 2265189.00 frames. +2023-03-27 10:05:34,509 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 10:05:46,493 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157666.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:05:50,618 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.524e+02 1.841e+02 2.248e+02 4.082e+02, threshold=3.682e+02, percent-clipped=3.0 +2023-03-27 10:06:07,193 INFO [finetune.py:976] (5/7) Epoch 28, batch 3050, loss[loss=0.1943, simple_loss=0.2627, pruned_loss=0.06297, over 4912.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.2447, pruned_loss=0.04929, over 952969.44 frames. ], batch size: 38, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:06:16,640 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157712.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:06:17,824 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=157714.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:06:33,279 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157738.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:06:38,531 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0972, 1.7729, 2.4360, 1.5859, 2.2101, 2.3126, 1.6678, 2.4658], + device='cuda:5'), covar=tensor([0.1183, 0.1990, 0.1327, 0.1870, 0.0873, 0.1385, 0.2468, 0.0794], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0205, 0.0192, 0.0188, 0.0173, 0.0211, 0.0217, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:06:40,233 INFO [finetune.py:976] (5/7) Epoch 28, batch 3100, loss[loss=0.1413, simple_loss=0.2133, pruned_loss=0.03466, over 4897.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2422, pruned_loss=0.04827, over 953544.32 frames. ], batch size: 46, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:06:48,816 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157760.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 10:06:57,054 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.389e+02 1.744e+02 2.105e+02 3.209e+02, threshold=3.488e+02, percent-clipped=0.0 +2023-03-27 10:07:19,514 INFO [finetune.py:976] (5/7) Epoch 28, batch 3150, loss[loss=0.193, simple_loss=0.2603, pruned_loss=0.06288, over 4688.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2395, pruned_loss=0.04753, over 953093.89 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:07:20,083 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157798.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:07:28,551 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.54 vs. limit=2.0 +2023-03-27 10:08:04,468 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=157846.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:08:05,669 INFO [finetune.py:976] (5/7) Epoch 28, batch 3200, loss[loss=0.1626, simple_loss=0.2317, pruned_loss=0.04676, over 4901.00 frames. ], tot_loss[loss=0.1664, simple_loss=0.2376, pruned_loss=0.04759, over 952147.96 frames. ], batch size: 32, lr: 2.88e-03, grad_scale: 16.0 +2023-03-27 10:08:09,353 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157853.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:08:14,087 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6447, 1.2573, 1.0189, 1.7189, 1.9998, 1.2830, 1.4818, 1.6834], + device='cuda:5'), covar=tensor([0.1218, 0.1733, 0.1581, 0.0918, 0.1786, 0.1852, 0.1227, 0.1448], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0109, 0.0092, 0.0120, 0.0092, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 10:08:15,275 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157862.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 10:08:22,848 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.817e+01 1.555e+02 1.831e+02 2.254e+02 7.078e+02, threshold=3.662e+02, percent-clipped=7.0 +2023-03-27 10:08:38,478 INFO [finetune.py:976] (5/7) Epoch 28, batch 3250, loss[loss=0.1703, simple_loss=0.2475, pruned_loss=0.04658, over 4754.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2394, pruned_loss=0.04847, over 951896.35 frames. ], batch size: 54, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:08:49,842 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157914.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:08:56,864 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157924.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:09:11,707 INFO [finetune.py:976] (5/7) Epoch 28, batch 3300, loss[loss=0.1868, simple_loss=0.2586, pruned_loss=0.05748, over 4743.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2435, pruned_loss=0.04961, over 951626.31 frames. ], batch size: 59, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:09:29,137 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.521e+02 1.758e+02 2.140e+02 4.599e+02, threshold=3.515e+02, percent-clipped=1.0 +2023-03-27 10:09:40,587 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5286, 3.4309, 3.2579, 1.4623, 3.6370, 2.6339, 0.7850, 2.3705], + device='cuda:5'), covar=tensor([0.2294, 0.2087, 0.1605, 0.3353, 0.1128, 0.1019, 0.4239, 0.1448], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0181, 0.0161, 0.0131, 0.0165, 0.0125, 0.0151, 0.0127], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 10:09:44,717 INFO [finetune.py:976] (5/7) Epoch 28, batch 3350, loss[loss=0.1589, simple_loss=0.2269, pruned_loss=0.04545, over 4870.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2458, pruned_loss=0.0501, over 952884.62 frames. ], batch size: 31, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:09:58,177 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158012.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:10:16,564 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.2267, 4.5341, 4.7250, 5.0512, 4.8954, 4.6305, 5.3338, 1.6586], + device='cuda:5'), covar=tensor([0.0717, 0.0892, 0.0779, 0.0791, 0.1354, 0.1665, 0.0543, 0.5876], + device='cuda:5'), in_proj_covar=tensor([0.0354, 0.0247, 0.0287, 0.0296, 0.0338, 0.0287, 0.0307, 0.0305], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:10:32,974 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158038.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:10:38,996 INFO [finetune.py:976] (5/7) Epoch 28, batch 3400, loss[loss=0.1454, simple_loss=0.2168, pruned_loss=0.03697, over 4767.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2469, pruned_loss=0.05066, over 953748.11 frames. ], batch size: 26, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:10:46,887 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=158060.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:10:46,932 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158060.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 10:10:55,546 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.103e+02 1.550e+02 1.897e+02 2.350e+02 3.360e+02, threshold=3.793e+02, percent-clipped=0.0 +2023-03-27 10:11:04,975 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=158086.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:11:12,585 INFO [finetune.py:976] (5/7) Epoch 28, batch 3450, loss[loss=0.1725, simple_loss=0.2371, pruned_loss=0.05392, over 4158.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.2464, pruned_loss=0.04993, over 954481.96 frames. ], batch size: 65, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:11:18,610 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=158108.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 10:11:28,439 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158122.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:11:46,008 INFO [finetune.py:976] (5/7) Epoch 28, batch 3500, loss[loss=0.1689, simple_loss=0.2337, pruned_loss=0.05203, over 4823.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2438, pruned_loss=0.04932, over 955856.71 frames. ], batch size: 39, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:11:51,042 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-03-27 10:11:55,066 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158162.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 10:12:02,509 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.003e+02 1.432e+02 1.800e+02 2.074e+02 3.411e+02, threshold=3.600e+02, percent-clipped=0.0 +2023-03-27 10:12:09,586 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158183.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:12:19,505 INFO [finetune.py:976] (5/7) Epoch 28, batch 3550, loss[loss=0.1358, simple_loss=0.2168, pruned_loss=0.02738, over 4840.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2401, pruned_loss=0.0481, over 957567.32 frames. ], batch size: 44, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:12:28,585 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158209.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:12:29,210 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=158210.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 10:12:31,674 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6584, 1.5907, 1.4780, 1.6038, 1.3337, 3.6298, 1.6199, 2.0713], + device='cuda:5'), covar=tensor([0.4008, 0.3092, 0.2408, 0.2805, 0.1673, 0.0241, 0.2482, 0.1084], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0124, 0.0114, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 10:12:38,720 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158224.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:13:02,524 INFO [finetune.py:976] (5/7) Epoch 28, batch 3600, loss[loss=0.201, simple_loss=0.2736, pruned_loss=0.06419, over 4853.00 frames. ], tot_loss[loss=0.166, simple_loss=0.2377, pruned_loss=0.0472, over 956891.24 frames. ], batch size: 44, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:13:18,165 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=158272.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:13:18,718 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.065e+02 1.425e+02 1.679e+02 2.016e+02 3.584e+02, threshold=3.358e+02, percent-clipped=0.0 +2023-03-27 10:13:36,304 INFO [finetune.py:976] (5/7) Epoch 28, batch 3650, loss[loss=0.1549, simple_loss=0.227, pruned_loss=0.04138, over 4045.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2392, pruned_loss=0.048, over 953760.60 frames. ], batch size: 65, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:14:10,066 INFO [finetune.py:976] (5/7) Epoch 28, batch 3700, loss[loss=0.1545, simple_loss=0.2399, pruned_loss=0.03457, over 4829.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2414, pruned_loss=0.04775, over 955194.31 frames. ], batch size: 33, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:14:23,365 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9815, 1.8325, 1.5655, 1.6978, 1.7187, 1.7116, 1.7531, 2.4524], + device='cuda:5'), covar=tensor([0.3507, 0.3854, 0.3254, 0.3423, 0.3737, 0.2296, 0.3367, 0.1536], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0266, 0.0239, 0.0277, 0.0262, 0.0232, 0.0261, 0.0240], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:14:26,133 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.573e+02 1.954e+02 2.338e+02 5.991e+02, threshold=3.909e+02, percent-clipped=5.0 +2023-03-27 10:14:33,456 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158384.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:14:43,250 INFO [finetune.py:976] (5/7) Epoch 28, batch 3750, loss[loss=0.1341, simple_loss=0.1966, pruned_loss=0.03582, over 4711.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2434, pruned_loss=0.04831, over 955658.27 frames. ], batch size: 23, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:15:20,392 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158445.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:15:22,039 INFO [finetune.py:976] (5/7) Epoch 28, batch 3800, loss[loss=0.1528, simple_loss=0.239, pruned_loss=0.03325, over 4882.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2447, pruned_loss=0.04915, over 954595.87 frames. ], batch size: 43, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:15:51,766 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.090e+02 1.580e+02 1.909e+02 2.258e+02 3.504e+02, threshold=3.818e+02, percent-clipped=0.0 +2023-03-27 10:15:55,436 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158478.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:16:08,885 INFO [finetune.py:976] (5/7) Epoch 28, batch 3850, loss[loss=0.2056, simple_loss=0.2659, pruned_loss=0.07261, over 4909.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.2444, pruned_loss=0.04906, over 956416.93 frames. ], batch size: 37, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:16:16,605 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158509.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:16:40,978 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1445, 1.9671, 1.8393, 1.9920, 1.8463, 1.8761, 1.9126, 2.6116], + device='cuda:5'), covar=tensor([0.3242, 0.3867, 0.2809, 0.3423, 0.4116, 0.2151, 0.3480, 0.1464], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0265, 0.0238, 0.0276, 0.0261, 0.0231, 0.0260, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:16:42,036 INFO [finetune.py:976] (5/7) Epoch 28, batch 3900, loss[loss=0.1429, simple_loss=0.2161, pruned_loss=0.03481, over 4900.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.241, pruned_loss=0.04781, over 957008.96 frames. ], batch size: 32, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:16:48,974 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=158557.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:16:58,922 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.423e+02 1.682e+02 2.011e+02 3.434e+02, threshold=3.365e+02, percent-clipped=0.0 +2023-03-27 10:17:15,468 INFO [finetune.py:976] (5/7) Epoch 28, batch 3950, loss[loss=0.2055, simple_loss=0.2608, pruned_loss=0.07515, over 4913.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2381, pruned_loss=0.04731, over 955350.48 frames. ], batch size: 43, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:17:48,877 INFO [finetune.py:976] (5/7) Epoch 28, batch 4000, loss[loss=0.1601, simple_loss=0.2419, pruned_loss=0.03912, over 4812.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2386, pruned_loss=0.0483, over 952881.56 frames. ], batch size: 41, lr: 2.87e-03, grad_scale: 16.0 +2023-03-27 10:18:15,600 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.131e+01 1.444e+02 1.852e+02 2.094e+02 7.470e+02, threshold=3.703e+02, percent-clipped=2.0 +2023-03-27 10:18:32,167 INFO [finetune.py:976] (5/7) Epoch 28, batch 4050, loss[loss=0.1581, simple_loss=0.2323, pruned_loss=0.04198, over 4876.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2419, pruned_loss=0.04925, over 954494.74 frames. ], batch size: 34, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:18:36,437 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4203, 2.4735, 2.0991, 2.1715, 3.0128, 3.0772, 2.5696, 2.4226], + device='cuda:5'), covar=tensor([0.0361, 0.0384, 0.0586, 0.0400, 0.0241, 0.0398, 0.0387, 0.0372], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0106, 0.0149, 0.0112, 0.0102, 0.0116, 0.0104, 0.0114], + device='cuda:5'), out_proj_covar=tensor([7.9259e-05, 8.1460e-05, 1.1565e-04, 8.5355e-05, 7.9033e-05, 8.5477e-05, + 7.7526e-05, 8.6754e-05], device='cuda:5') +2023-03-27 10:18:59,956 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158740.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:19:05,204 INFO [finetune.py:976] (5/7) Epoch 28, batch 4100, loss[loss=0.1501, simple_loss=0.2326, pruned_loss=0.03373, over 4815.00 frames. ], tot_loss[loss=0.1711, simple_loss=0.2438, pruned_loss=0.04916, over 953873.16 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:19:14,676 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9909, 1.3565, 1.9018, 1.9300, 1.7356, 1.7057, 1.8702, 1.7995], + device='cuda:5'), covar=tensor([0.4257, 0.4105, 0.3465, 0.3841, 0.4797, 0.4244, 0.4756, 0.3227], + device='cuda:5'), in_proj_covar=tensor([0.0267, 0.0248, 0.0268, 0.0296, 0.0295, 0.0273, 0.0302, 0.0251], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:19:22,719 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.534e+02 1.832e+02 2.269e+02 3.411e+02, threshold=3.665e+02, percent-clipped=0.0 +2023-03-27 10:19:25,291 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158777.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:19:25,878 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158778.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:19:29,401 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158783.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:19:38,842 INFO [finetune.py:976] (5/7) Epoch 28, batch 4150, loss[loss=0.1435, simple_loss=0.2427, pruned_loss=0.02217, over 4816.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2435, pruned_loss=0.04877, over 951663.49 frames. ], batch size: 38, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:19:58,148 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=158826.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:20:03,646 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158835.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 10:20:05,950 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158838.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:20:09,554 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158844.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:20:11,900 INFO [finetune.py:976] (5/7) Epoch 28, batch 4200, loss[loss=0.1616, simple_loss=0.2425, pruned_loss=0.04036, over 4887.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2451, pruned_loss=0.049, over 954549.99 frames. ], batch size: 32, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:20:35,371 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.763e+01 1.457e+02 1.627e+02 2.050e+02 3.601e+02, threshold=3.253e+02, percent-clipped=0.0 +2023-03-27 10:21:03,466 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158896.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 10:21:04,569 INFO [finetune.py:976] (5/7) Epoch 28, batch 4250, loss[loss=0.1753, simple_loss=0.2329, pruned_loss=0.05885, over 4799.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2432, pruned_loss=0.04847, over 954720.82 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:21:46,470 INFO [finetune.py:976] (5/7) Epoch 28, batch 4300, loss[loss=0.1375, simple_loss=0.2134, pruned_loss=0.03082, over 4766.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2407, pruned_loss=0.04783, over 956621.76 frames. ], batch size: 26, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:22:01,913 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.0290, 2.6418, 2.5429, 1.3110, 2.7282, 2.1586, 2.1118, 2.5130], + device='cuda:5'), covar=tensor([0.0952, 0.0881, 0.1999, 0.2260, 0.1620, 0.2467, 0.2236, 0.1212], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0191, 0.0200, 0.0180, 0.0211, 0.0210, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:22:03,613 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.473e+02 1.789e+02 2.045e+02 3.501e+02, threshold=3.577e+02, percent-clipped=2.0 +2023-03-27 10:22:08,869 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2425, 2.2072, 2.3276, 1.6828, 2.2181, 2.4012, 2.5045, 1.9014], + device='cuda:5'), covar=tensor([0.0604, 0.0623, 0.0629, 0.0833, 0.0709, 0.0687, 0.0525, 0.1053], + device='cuda:5'), in_proj_covar=tensor([0.0130, 0.0137, 0.0139, 0.0118, 0.0128, 0.0139, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:22:20,213 INFO [finetune.py:976] (5/7) Epoch 28, batch 4350, loss[loss=0.123, simple_loss=0.1993, pruned_loss=0.02335, over 4765.00 frames. ], tot_loss[loss=0.1653, simple_loss=0.2374, pruned_loss=0.04662, over 956160.43 frames. ], batch size: 27, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:22:25,126 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-03-27 10:22:48,303 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159040.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:22:53,108 INFO [finetune.py:976] (5/7) Epoch 28, batch 4400, loss[loss=0.1835, simple_loss=0.2513, pruned_loss=0.05785, over 4883.00 frames. ], tot_loss[loss=0.1649, simple_loss=0.2368, pruned_loss=0.04656, over 955392.25 frames. ], batch size: 32, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:23:07,425 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159069.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:23:09,712 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.014e+02 1.529e+02 1.726e+02 2.218e+02 4.795e+02, threshold=3.452e+02, percent-clipped=2.0 +2023-03-27 10:23:24,715 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=159088.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:23:35,365 INFO [finetune.py:976] (5/7) Epoch 28, batch 4450, loss[loss=0.1786, simple_loss=0.2525, pruned_loss=0.05235, over 4752.00 frames. ], tot_loss[loss=0.1677, simple_loss=0.2404, pruned_loss=0.04745, over 955698.37 frames. ], batch size: 59, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:24:00,787 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159130.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:24:02,994 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159133.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:24:07,592 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159139.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:24:12,910 INFO [finetune.py:976] (5/7) Epoch 28, batch 4500, loss[loss=0.1552, simple_loss=0.2407, pruned_loss=0.0348, over 4849.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2428, pruned_loss=0.04802, over 956781.78 frames. ], batch size: 49, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:24:20,181 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159159.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:24:28,961 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.553e+02 1.848e+02 2.152e+02 3.966e+02, threshold=3.696e+02, percent-clipped=2.0 +2023-03-27 10:24:29,070 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.2469, 3.7357, 3.9362, 4.0801, 4.0376, 3.7361, 4.2970, 1.5729], + device='cuda:5'), covar=tensor([0.0935, 0.0893, 0.0907, 0.1037, 0.1549, 0.1702, 0.0879, 0.5377], + device='cuda:5'), in_proj_covar=tensor([0.0356, 0.0249, 0.0289, 0.0299, 0.0342, 0.0289, 0.0309, 0.0307], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:24:35,804 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 10:24:41,940 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159191.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 10:24:46,582 INFO [finetune.py:976] (5/7) Epoch 28, batch 4550, loss[loss=0.148, simple_loss=0.2085, pruned_loss=0.04373, over 4400.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2442, pruned_loss=0.04849, over 957279.49 frames. ], batch size: 19, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:24:46,653 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7307, 3.9172, 3.7512, 1.9266, 4.0400, 2.9316, 1.1642, 2.7518], + device='cuda:5'), covar=tensor([0.2171, 0.1704, 0.1462, 0.3153, 0.0880, 0.0946, 0.3882, 0.1372], + device='cuda:5'), in_proj_covar=tensor([0.0153, 0.0183, 0.0162, 0.0132, 0.0165, 0.0126, 0.0151, 0.0128], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 10:25:00,529 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159220.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:25:09,817 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8967, 1.7574, 1.9403, 1.1787, 1.9256, 1.9101, 1.8937, 1.5698], + device='cuda:5'), covar=tensor([0.0592, 0.0827, 0.0655, 0.0923, 0.0735, 0.0765, 0.0712, 0.1283], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0139, 0.0141, 0.0120, 0.0129, 0.0140, 0.0141, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:25:20,096 INFO [finetune.py:976] (5/7) Epoch 28, batch 4600, loss[loss=0.1689, simple_loss=0.2304, pruned_loss=0.05367, over 4757.00 frames. ], tot_loss[loss=0.1699, simple_loss=0.2436, pruned_loss=0.04811, over 958715.82 frames. ], batch size: 28, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:25:35,685 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.005e+02 1.454e+02 1.668e+02 2.062e+02 3.965e+02, threshold=3.336e+02, percent-clipped=3.0 +2023-03-27 10:25:38,706 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2508, 2.1463, 1.8022, 2.2679, 2.8308, 2.2955, 2.3004, 1.7398], + device='cuda:5'), covar=tensor([0.2031, 0.1769, 0.1849, 0.1493, 0.1630, 0.1061, 0.1809, 0.1843], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0215, 0.0217, 0.0202, 0.0248, 0.0193, 0.0219, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:25:54,148 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159290.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:26:05,014 INFO [finetune.py:976] (5/7) Epoch 28, batch 4650, loss[loss=0.1717, simple_loss=0.2463, pruned_loss=0.04854, over 4822.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2419, pruned_loss=0.04827, over 958014.23 frames. ], batch size: 33, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:26:54,498 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.9996, 1.2015, 1.4676, 1.2333, 1.3845, 2.4251, 1.1542, 1.3864], + device='cuda:5'), covar=tensor([0.1107, 0.1932, 0.1032, 0.0994, 0.1663, 0.0394, 0.1613, 0.1849], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0083, 0.0074, 0.0077, 0.0092, 0.0081, 0.0086, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 10:26:55,598 INFO [finetune.py:976] (5/7) Epoch 28, batch 4700, loss[loss=0.1402, simple_loss=0.2163, pruned_loss=0.03212, over 4819.00 frames. ], tot_loss[loss=0.1668, simple_loss=0.2389, pruned_loss=0.04741, over 959328.41 frames. ], batch size: 38, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:26:58,035 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159351.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:27:11,661 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.356e+01 1.407e+02 1.592e+02 1.978e+02 3.098e+02, threshold=3.183e+02, percent-clipped=0.0 +2023-03-27 10:27:24,859 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-03-27 10:27:28,647 INFO [finetune.py:976] (5/7) Epoch 28, batch 4750, loss[loss=0.1402, simple_loss=0.2116, pruned_loss=0.03438, over 4810.00 frames. ], tot_loss[loss=0.1669, simple_loss=0.2381, pruned_loss=0.04779, over 960661.48 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 32.0 +2023-03-27 10:27:46,457 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159425.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:27:51,867 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159433.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:27:55,945 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159439.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:28:02,248 INFO [finetune.py:976] (5/7) Epoch 28, batch 4800, loss[loss=0.186, simple_loss=0.2717, pruned_loss=0.05016, over 4818.00 frames. ], tot_loss[loss=0.1688, simple_loss=0.2408, pruned_loss=0.04846, over 959836.05 frames. ], batch size: 39, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:28:18,791 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.565e+02 1.779e+02 2.195e+02 3.956e+02, threshold=3.558e+02, percent-clipped=1.0 +2023-03-27 10:28:23,704 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=159481.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:28:27,818 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=159487.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:28:30,767 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159491.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 10:28:36,820 INFO [finetune.py:976] (5/7) Epoch 28, batch 4850, loss[loss=0.1505, simple_loss=0.2175, pruned_loss=0.04175, over 4743.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.2444, pruned_loss=0.04933, over 958670.60 frames. ], batch size: 23, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:28:57,883 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159515.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:29:07,373 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0998, 2.0661, 2.2370, 1.5663, 1.9994, 2.2475, 2.3252, 1.7464], + device='cuda:5'), covar=tensor([0.0595, 0.0697, 0.0629, 0.0830, 0.0773, 0.0668, 0.0536, 0.1239], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0139, 0.0141, 0.0120, 0.0130, 0.0141, 0.0141, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:29:17,408 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=159539.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 10:29:23,230 INFO [finetune.py:976] (5/7) Epoch 28, batch 4900, loss[loss=0.1689, simple_loss=0.2431, pruned_loss=0.04734, over 4866.00 frames. ], tot_loss[loss=0.173, simple_loss=0.2462, pruned_loss=0.04989, over 957654.65 frames. ], batch size: 31, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:29:40,325 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.030e+02 1.553e+02 1.825e+02 2.271e+02 5.584e+02, threshold=3.651e+02, percent-clipped=3.0 +2023-03-27 10:29:56,964 INFO [finetune.py:976] (5/7) Epoch 28, batch 4950, loss[loss=0.1211, simple_loss=0.1883, pruned_loss=0.02698, over 4416.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2462, pruned_loss=0.04963, over 957584.87 frames. ], batch size: 19, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:30:18,768 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159631.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:30:28,814 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159646.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:30:29,445 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4495, 2.2247, 1.7816, 0.7654, 1.9479, 1.9411, 1.8720, 2.1027], + device='cuda:5'), covar=tensor([0.0767, 0.0833, 0.1620, 0.2127, 0.1488, 0.2227, 0.2147, 0.0884], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0191, 0.0201, 0.0181, 0.0211, 0.0210, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:30:29,933 INFO [finetune.py:976] (5/7) Epoch 28, batch 5000, loss[loss=0.1715, simple_loss=0.2443, pruned_loss=0.04931, over 4852.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.244, pruned_loss=0.04883, over 956469.36 frames. ], batch size: 31, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:30:47,441 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.490e+02 1.734e+02 1.957e+02 3.436e+02, threshold=3.469e+02, percent-clipped=0.0 +2023-03-27 10:30:53,577 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3385, 1.5139, 0.7531, 2.1766, 2.6816, 1.8594, 1.9311, 2.0557], + device='cuda:5'), covar=tensor([0.1280, 0.1937, 0.1936, 0.1065, 0.1656, 0.1770, 0.1298, 0.1914], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0109, 0.0093, 0.0120, 0.0093, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 10:30:59,472 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159692.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:31:05,618 INFO [finetune.py:976] (5/7) Epoch 28, batch 5050, loss[loss=0.1567, simple_loss=0.2249, pruned_loss=0.04427, over 4825.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.2411, pruned_loss=0.04816, over 955398.10 frames. ], batch size: 41, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:31:19,363 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-27 10:31:32,254 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159725.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:31:54,044 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5120, 1.4367, 2.0286, 1.8554, 1.5911, 3.3258, 1.3275, 1.6506], + device='cuda:5'), covar=tensor([0.0977, 0.1769, 0.1109, 0.0837, 0.1463, 0.0252, 0.1464, 0.1622], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0076, 0.0091, 0.0080, 0.0086, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 10:31:55,163 INFO [finetune.py:976] (5/7) Epoch 28, batch 5100, loss[loss=0.1555, simple_loss=0.2356, pruned_loss=0.03773, over 4869.00 frames. ], tot_loss[loss=0.1658, simple_loss=0.2379, pruned_loss=0.04683, over 955640.62 frames. ], batch size: 34, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:32:21,761 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.008e+02 1.553e+02 1.866e+02 2.180e+02 3.771e+02, threshold=3.731e+02, percent-clipped=1.0 +2023-03-27 10:32:21,834 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=159773.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:32:32,318 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0743, 1.9754, 1.7278, 1.8552, 1.8493, 1.8364, 1.9034, 2.5137], + device='cuda:5'), covar=tensor([0.3454, 0.3709, 0.2993, 0.3518, 0.4131, 0.2325, 0.3534, 0.1705], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0264, 0.0238, 0.0275, 0.0262, 0.0232, 0.0260, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:32:37,625 INFO [finetune.py:976] (5/7) Epoch 28, batch 5150, loss[loss=0.2125, simple_loss=0.2798, pruned_loss=0.07256, over 4839.00 frames. ], tot_loss[loss=0.1659, simple_loss=0.2377, pruned_loss=0.04699, over 955205.09 frames. ], batch size: 47, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:32:49,249 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159815.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:32:53,745 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.45 vs. limit=5.0 +2023-03-27 10:33:11,651 INFO [finetune.py:976] (5/7) Epoch 28, batch 5200, loss[loss=0.1689, simple_loss=0.2551, pruned_loss=0.04135, over 4865.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2406, pruned_loss=0.04784, over 954377.21 frames. ], batch size: 44, lr: 2.86e-03, grad_scale: 32.0 +2023-03-27 10:33:15,846 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6594, 1.4675, 2.2733, 3.3878, 2.1951, 2.3448, 1.0560, 2.8761], + device='cuda:5'), covar=tensor([0.1665, 0.1398, 0.1213, 0.0562, 0.0790, 0.1938, 0.1789, 0.0447], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0133, 0.0166, 0.0101, 0.0136, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 10:33:16,450 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159855.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:33:21,713 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=159863.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:33:22,462 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-27 10:33:28,751 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.871e+02 2.174e+02 4.313e+02, threshold=3.743e+02, percent-clipped=1.0 +2023-03-27 10:33:44,909 INFO [finetune.py:976] (5/7) Epoch 28, batch 5250, loss[loss=0.1461, simple_loss=0.2287, pruned_loss=0.03173, over 4816.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2431, pruned_loss=0.04863, over 955024.54 frames. ], batch size: 39, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:33:53,301 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159910.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:33:59,377 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159916.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:34:26,201 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159946.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:34:27,320 INFO [finetune.py:976] (5/7) Epoch 28, batch 5300, loss[loss=0.1987, simple_loss=0.2569, pruned_loss=0.07027, over 4207.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2439, pruned_loss=0.04864, over 955178.30 frames. ], batch size: 65, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:34:28,639 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2283, 1.6745, 1.1133, 2.1138, 2.3387, 1.9370, 1.8941, 2.1038], + device='cuda:5'), covar=tensor([0.1058, 0.1522, 0.1612, 0.0871, 0.1619, 0.1730, 0.1041, 0.1480], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0108, 0.0093, 0.0120, 0.0092, 0.0098, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 10:34:40,284 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1914, 2.0710, 1.8232, 2.2667, 2.6809, 2.2153, 2.0533, 1.6742], + device='cuda:5'), covar=tensor([0.2061, 0.1838, 0.1762, 0.1486, 0.1706, 0.1078, 0.1960, 0.1819], + device='cuda:5'), in_proj_covar=tensor([0.0246, 0.0212, 0.0215, 0.0199, 0.0245, 0.0191, 0.0217, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:34:50,636 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159971.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:34:52,303 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.066e+02 1.498e+02 1.765e+02 2.107e+02 3.764e+02, threshold=3.530e+02, percent-clipped=1.0 +2023-03-27 10:35:01,757 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159987.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:35:06,014 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=159994.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:35:06,703 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5091, 2.6853, 2.6615, 2.0723, 2.7124, 2.9432, 3.0956, 2.3742], + device='cuda:5'), covar=tensor([0.0701, 0.0670, 0.0808, 0.0822, 0.0640, 0.0757, 0.0573, 0.1106], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0139, 0.0141, 0.0120, 0.0130, 0.0140, 0.0141, 0.0165], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:35:08,414 INFO [finetune.py:976] (5/7) Epoch 28, batch 5350, loss[loss=0.1744, simple_loss=0.2447, pruned_loss=0.05203, over 4778.00 frames. ], tot_loss[loss=0.1708, simple_loss=0.2444, pruned_loss=0.04861, over 955107.80 frames. ], batch size: 29, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:35:15,105 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.9098, 4.2810, 4.5286, 4.7676, 4.6888, 4.4026, 5.0284, 1.5923], + device='cuda:5'), covar=tensor([0.0643, 0.0881, 0.0652, 0.0698, 0.1012, 0.1286, 0.0446, 0.5475], + device='cuda:5'), in_proj_covar=tensor([0.0358, 0.0252, 0.0290, 0.0302, 0.0345, 0.0292, 0.0310, 0.0309], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:35:17,527 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0944, 2.1325, 1.9460, 2.1868, 1.6282, 4.8071, 1.9076, 2.6491], + device='cuda:5'), covar=tensor([0.2958, 0.2275, 0.1849, 0.2038, 0.1533, 0.0143, 0.2222, 0.0986], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0123, 0.0113, 0.0095, 0.0093, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 10:35:35,794 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.27 vs. limit=5.0 +2023-03-27 10:35:43,141 INFO [finetune.py:976] (5/7) Epoch 28, batch 5400, loss[loss=0.176, simple_loss=0.2388, pruned_loss=0.05658, over 4789.00 frames. ], tot_loss[loss=0.1688, simple_loss=0.2415, pruned_loss=0.04803, over 955175.23 frames. ], batch size: 51, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:36:00,195 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.280e+01 1.494e+02 1.877e+02 2.215e+02 3.734e+02, threshold=3.755e+02, percent-clipped=1.0 +2023-03-27 10:36:16,671 INFO [finetune.py:976] (5/7) Epoch 28, batch 5450, loss[loss=0.1325, simple_loss=0.2134, pruned_loss=0.02579, over 4868.00 frames. ], tot_loss[loss=0.1656, simple_loss=0.2379, pruned_loss=0.04667, over 954325.80 frames. ], batch size: 49, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:36:25,900 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3800, 2.7715, 2.5528, 2.0664, 2.5676, 2.9861, 3.0962, 2.4642], + device='cuda:5'), covar=tensor([0.0700, 0.0581, 0.0739, 0.0781, 0.0698, 0.0682, 0.0482, 0.0979], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0139, 0.0141, 0.0119, 0.0129, 0.0140, 0.0141, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:36:46,057 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3080, 1.4741, 1.5054, 0.8189, 1.5211, 1.7628, 1.7858, 1.3935], + device='cuda:5'), covar=tensor([0.0842, 0.0527, 0.0476, 0.0480, 0.0447, 0.0481, 0.0316, 0.0629], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0148, 0.0131, 0.0123, 0.0132, 0.0130, 0.0143, 0.0152], + device='cuda:5'), out_proj_covar=tensor([8.8519e-05, 1.0628e-04, 9.2924e-05, 8.6075e-05, 9.2281e-05, 9.2267e-05, + 1.0175e-04, 1.0847e-04], device='cuda:5') +2023-03-27 10:37:05,467 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3166, 1.4773, 1.5190, 0.8698, 1.5237, 1.7862, 1.7646, 1.3907], + device='cuda:5'), covar=tensor([0.1053, 0.0694, 0.0559, 0.0598, 0.0574, 0.0617, 0.0340, 0.0778], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0148, 0.0131, 0.0123, 0.0131, 0.0130, 0.0143, 0.0152], + device='cuda:5'), out_proj_covar=tensor([8.8362e-05, 1.0608e-04, 9.2743e-05, 8.5933e-05, 9.2106e-05, 9.2065e-05, + 1.0159e-04, 1.0826e-04], device='cuda:5') +2023-03-27 10:37:07,793 INFO [finetune.py:976] (5/7) Epoch 28, batch 5500, loss[loss=0.1811, simple_loss=0.2537, pruned_loss=0.05427, over 4765.00 frames. ], tot_loss[loss=0.164, simple_loss=0.236, pruned_loss=0.046, over 952258.98 frames. ], batch size: 59, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:37:38,236 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.497e+02 1.808e+02 2.088e+02 3.346e+02, threshold=3.616e+02, percent-clipped=0.0 +2023-03-27 10:37:55,667 INFO [finetune.py:976] (5/7) Epoch 28, batch 5550, loss[loss=0.1923, simple_loss=0.2637, pruned_loss=0.06048, over 4805.00 frames. ], tot_loss[loss=0.1658, simple_loss=0.238, pruned_loss=0.04677, over 952516.74 frames. ], batch size: 41, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:38:02,659 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4675, 1.4542, 1.5188, 0.8345, 1.5491, 1.5695, 1.5496, 1.3960], + device='cuda:5'), covar=tensor([0.0642, 0.0870, 0.0751, 0.0954, 0.1002, 0.0755, 0.0727, 0.1364], + device='cuda:5'), in_proj_covar=tensor([0.0133, 0.0139, 0.0141, 0.0120, 0.0130, 0.0141, 0.0141, 0.0164], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:38:03,851 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160211.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:38:27,211 INFO [finetune.py:976] (5/7) Epoch 28, batch 5600, loss[loss=0.1505, simple_loss=0.2187, pruned_loss=0.04117, over 4709.00 frames. ], tot_loss[loss=0.1671, simple_loss=0.2404, pruned_loss=0.04689, over 955007.29 frames. ], batch size: 23, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:38:37,622 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160266.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:38:39,995 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4427, 2.3150, 1.9186, 2.4270, 2.3335, 2.0780, 2.6606, 2.4346], + device='cuda:5'), covar=tensor([0.1306, 0.1952, 0.2879, 0.2339, 0.2435, 0.1647, 0.2772, 0.1605], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0189, 0.0235, 0.0252, 0.0249, 0.0207, 0.0214, 0.0202], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:38:42,223 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.448e+02 1.891e+02 2.366e+02 4.690e+02, threshold=3.782e+02, percent-clipped=2.0 +2023-03-27 10:38:45,564 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-27 10:38:49,874 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160287.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:38:56,598 INFO [finetune.py:976] (5/7) Epoch 28, batch 5650, loss[loss=0.1929, simple_loss=0.26, pruned_loss=0.06292, over 4833.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2432, pruned_loss=0.04759, over 957387.48 frames. ], batch size: 47, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:39:01,989 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160307.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 10:39:07,598 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 10:39:24,565 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=160335.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:39:35,274 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-03-27 10:39:36,255 INFO [finetune.py:976] (5/7) Epoch 28, batch 5700, loss[loss=0.1425, simple_loss=0.1979, pruned_loss=0.04361, over 4165.00 frames. ], tot_loss[loss=0.167, simple_loss=0.2394, pruned_loss=0.04724, over 938300.59 frames. ], batch size: 18, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:39:46,260 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4083, 1.3095, 1.3643, 0.8170, 1.2758, 1.4014, 1.3719, 1.2492], + device='cuda:5'), covar=tensor([0.0502, 0.0714, 0.0601, 0.0776, 0.0949, 0.0667, 0.0549, 0.1269], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0138, 0.0140, 0.0119, 0.0129, 0.0140, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:39:48,025 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160368.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 10:39:50,280 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-03-27 10:39:53,235 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.932e+01 1.353e+02 1.695e+02 2.046e+02 5.031e+02, threshold=3.390e+02, percent-clipped=3.0 +2023-03-27 10:40:10,861 INFO [finetune.py:976] (5/7) Epoch 29, batch 0, loss[loss=0.1867, simple_loss=0.2621, pruned_loss=0.05563, over 4834.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2621, pruned_loss=0.05563, over 4834.00 frames. ], batch size: 30, lr: 2.86e-03, grad_scale: 16.0 +2023-03-27 10:40:10,861 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 10:40:21,879 INFO [finetune.py:1010] (5/7) Epoch 29, validation: loss=0.1588, simple_loss=0.2262, pruned_loss=0.04569, over 2265189.00 frames. +2023-03-27 10:40:21,879 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 10:40:24,832 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6315, 1.3869, 2.1025, 3.1787, 2.1286, 2.2925, 0.9982, 2.7612], + device='cuda:5'), covar=tensor([0.1678, 0.1449, 0.1251, 0.0553, 0.0851, 0.1390, 0.1819, 0.0426], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0134, 0.0165, 0.0101, 0.0136, 0.0125, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 10:40:57,963 INFO [finetune.py:976] (5/7) Epoch 29, batch 50, loss[loss=0.1827, simple_loss=0.265, pruned_loss=0.05015, over 4813.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2511, pruned_loss=0.0521, over 216267.91 frames. ], batch size: 40, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:41:07,358 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 10:41:17,650 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.00 vs. limit=5.0 +2023-03-27 10:41:23,850 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.10 vs. limit=2.0 +2023-03-27 10:41:38,858 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.219e+02 1.505e+02 1.852e+02 2.145e+02 8.823e+02, threshold=3.704e+02, percent-clipped=1.0 +2023-03-27 10:41:39,934 INFO [finetune.py:976] (5/7) Epoch 29, batch 100, loss[loss=0.1391, simple_loss=0.2137, pruned_loss=0.03219, over 4201.00 frames. ], tot_loss[loss=0.1675, simple_loss=0.2393, pruned_loss=0.04784, over 380159.29 frames. ], batch size: 65, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:42:14,766 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160511.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:42:34,271 INFO [finetune.py:976] (5/7) Epoch 29, batch 150, loss[loss=0.2058, simple_loss=0.2651, pruned_loss=0.0732, over 4853.00 frames. ], tot_loss[loss=0.1651, simple_loss=0.2362, pruned_loss=0.04699, over 508448.16 frames. ], batch size: 44, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:42:43,227 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3509, 2.9629, 3.0933, 3.2736, 3.1563, 2.8937, 3.3605, 0.9413], + device='cuda:5'), covar=tensor([0.0944, 0.1035, 0.1112, 0.1050, 0.1364, 0.1759, 0.1026, 0.5597], + device='cuda:5'), in_proj_covar=tensor([0.0355, 0.0250, 0.0287, 0.0299, 0.0341, 0.0289, 0.0308, 0.0306], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:42:55,949 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=160559.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:43:00,755 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160566.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:43:06,536 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.027e+02 1.446e+02 1.774e+02 2.135e+02 3.412e+02, threshold=3.547e+02, percent-clipped=0.0 +2023-03-27 10:43:07,133 INFO [finetune.py:976] (5/7) Epoch 29, batch 200, loss[loss=0.1909, simple_loss=0.2678, pruned_loss=0.05699, over 4808.00 frames. ], tot_loss[loss=0.162, simple_loss=0.2329, pruned_loss=0.04553, over 606172.94 frames. ], batch size: 38, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:43:12,402 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1114, 1.7731, 2.6506, 3.9973, 2.6966, 2.7342, 0.8587, 3.4254], + device='cuda:5'), covar=tensor([0.1673, 0.1441, 0.1285, 0.0625, 0.0769, 0.1584, 0.2012, 0.0402], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0116, 0.0133, 0.0165, 0.0100, 0.0135, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 10:43:32,500 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=160614.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:43:40,980 INFO [finetune.py:976] (5/7) Epoch 29, batch 250, loss[loss=0.1977, simple_loss=0.2715, pruned_loss=0.06195, over 4927.00 frames. ], tot_loss[loss=0.1646, simple_loss=0.236, pruned_loss=0.04658, over 684192.72 frames. ], batch size: 33, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:44:05,536 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160663.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 10:44:12,916 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.082e+02 1.455e+02 1.868e+02 2.134e+02 3.558e+02, threshold=3.736e+02, percent-clipped=1.0 +2023-03-27 10:44:13,967 INFO [finetune.py:976] (5/7) Epoch 29, batch 300, loss[loss=0.1792, simple_loss=0.264, pruned_loss=0.04722, over 4900.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2399, pruned_loss=0.04811, over 742711.64 frames. ], batch size: 37, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:44:26,386 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 10:44:36,689 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-03-27 10:44:57,148 INFO [finetune.py:976] (5/7) Epoch 29, batch 350, loss[loss=0.1588, simple_loss=0.234, pruned_loss=0.0418, over 4808.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2418, pruned_loss=0.04819, over 789422.64 frames. ], batch size: 40, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:45:20,180 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5188, 2.3861, 2.6482, 1.8751, 2.4365, 2.7154, 2.7421, 2.1279], + device='cuda:5'), covar=tensor([0.0524, 0.0595, 0.0546, 0.0754, 0.0734, 0.0593, 0.0506, 0.0969], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0140, 0.0119, 0.0128, 0.0140, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:45:37,469 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.928e+01 1.581e+02 1.864e+02 2.210e+02 3.251e+02, threshold=3.728e+02, percent-clipped=0.0 +2023-03-27 10:45:38,109 INFO [finetune.py:976] (5/7) Epoch 29, batch 400, loss[loss=0.1558, simple_loss=0.238, pruned_loss=0.0368, over 4846.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2414, pruned_loss=0.04766, over 824652.45 frames. ], batch size: 44, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:45:59,574 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.0428, 0.9527, 0.9897, 0.4796, 1.0300, 1.1665, 1.1948, 0.9910], + device='cuda:5'), covar=tensor([0.1015, 0.0730, 0.0685, 0.0642, 0.0634, 0.0786, 0.0496, 0.0862], + device='cuda:5'), in_proj_covar=tensor([0.0120, 0.0148, 0.0130, 0.0122, 0.0131, 0.0130, 0.0142, 0.0151], + device='cuda:5'), out_proj_covar=tensor([8.7830e-05, 1.0579e-04, 9.2256e-05, 8.5358e-05, 9.1659e-05, 9.1751e-05, + 1.0126e-04, 1.0756e-04], device='cuda:5') +2023-03-27 10:46:05,494 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9149, 1.9352, 1.8541, 1.9333, 1.7258, 3.7221, 1.7968, 2.2174], + device='cuda:5'), covar=tensor([0.2886, 0.2134, 0.1699, 0.1997, 0.1293, 0.0266, 0.2465, 0.1057], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0124, 0.0113, 0.0095, 0.0093, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 10:46:11,857 INFO [finetune.py:976] (5/7) Epoch 29, batch 450, loss[loss=0.1741, simple_loss=0.2489, pruned_loss=0.04967, over 4877.00 frames. ], tot_loss[loss=0.1675, simple_loss=0.2404, pruned_loss=0.04729, over 852977.53 frames. ], batch size: 32, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:46:39,404 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6216, 3.3828, 3.1893, 1.5047, 3.5088, 2.6810, 0.6916, 2.3003], + device='cuda:5'), covar=tensor([0.2301, 0.1870, 0.1811, 0.3382, 0.1182, 0.1004, 0.4295, 0.1577], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0180, 0.0160, 0.0130, 0.0163, 0.0124, 0.0150, 0.0126], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 10:46:55,071 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.473e+02 1.674e+02 2.082e+02 4.783e+02, threshold=3.348e+02, percent-clipped=2.0 +2023-03-27 10:46:55,690 INFO [finetune.py:976] (5/7) Epoch 29, batch 500, loss[loss=0.1423, simple_loss=0.2104, pruned_loss=0.03707, over 4807.00 frames. ], tot_loss[loss=0.1665, simple_loss=0.2389, pruned_loss=0.04707, over 876971.43 frames. ], batch size: 25, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:47:03,659 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-03-27 10:47:15,118 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160901.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:47:36,924 INFO [finetune.py:976] (5/7) Epoch 29, batch 550, loss[loss=0.151, simple_loss=0.231, pruned_loss=0.03551, over 4827.00 frames. ], tot_loss[loss=0.1646, simple_loss=0.2365, pruned_loss=0.04631, over 895651.11 frames. ], batch size: 47, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:48:08,210 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160962.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:48:08,825 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160963.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 10:48:10,049 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160965.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:48:11,247 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.0291, 0.9746, 0.9542, 1.1081, 1.2114, 1.1741, 1.0510, 0.9618], + device='cuda:5'), covar=tensor([0.0404, 0.0314, 0.0743, 0.0340, 0.0326, 0.0506, 0.0375, 0.0454], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0106, 0.0148, 0.0111, 0.0102, 0.0117, 0.0105, 0.0115], + device='cuda:5'), out_proj_covar=tensor([7.8707e-05, 8.1178e-05, 1.1546e-04, 8.4904e-05, 7.8574e-05, 8.5927e-05, + 7.7588e-05, 8.6940e-05], device='cuda:5') +2023-03-27 10:48:15,382 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.000e+01 1.387e+02 1.769e+02 2.082e+02 3.377e+02, threshold=3.538e+02, percent-clipped=1.0 +2023-03-27 10:48:16,013 INFO [finetune.py:976] (5/7) Epoch 29, batch 600, loss[loss=0.1491, simple_loss=0.2267, pruned_loss=0.03572, over 4824.00 frames. ], tot_loss[loss=0.1666, simple_loss=0.2387, pruned_loss=0.04724, over 910814.36 frames. ], batch size: 30, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:48:24,212 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 10:48:41,021 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=161011.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 10:48:41,640 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9793, 1.6531, 2.3112, 1.5659, 2.0228, 2.2166, 1.5564, 2.3230], + device='cuda:5'), covar=tensor([0.1195, 0.2028, 0.1398, 0.1916, 0.0943, 0.1364, 0.3039, 0.0835], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0206, 0.0193, 0.0190, 0.0174, 0.0214, 0.0219, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:48:44,057 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2279, 2.1684, 2.2304, 1.5173, 2.1804, 2.3894, 2.4173, 1.8724], + device='cuda:5'), covar=tensor([0.0576, 0.0660, 0.0694, 0.0940, 0.0727, 0.0737, 0.0600, 0.1111], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0138, 0.0141, 0.0119, 0.0128, 0.0141, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:48:49,431 INFO [finetune.py:976] (5/7) Epoch 29, batch 650, loss[loss=0.1599, simple_loss=0.2417, pruned_loss=0.03909, over 4753.00 frames. ], tot_loss[loss=0.1699, simple_loss=0.2425, pruned_loss=0.04869, over 920653.63 frames. ], batch size: 27, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:48:50,194 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161026.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:48:57,688 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-03-27 10:49:22,504 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.587e+02 1.904e+02 2.267e+02 4.706e+02, threshold=3.807e+02, percent-clipped=2.0 +2023-03-27 10:49:23,104 INFO [finetune.py:976] (5/7) Epoch 29, batch 700, loss[loss=0.2028, simple_loss=0.2806, pruned_loss=0.06252, over 4254.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2439, pruned_loss=0.04905, over 927859.14 frames. ], batch size: 65, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:50:03,475 INFO [finetune.py:976] (5/7) Epoch 29, batch 750, loss[loss=0.1623, simple_loss=0.2341, pruned_loss=0.04521, over 4922.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2453, pruned_loss=0.04933, over 935992.62 frames. ], batch size: 33, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:50:05,380 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9956, 1.9807, 1.8616, 1.7693, 2.5561, 2.7345, 2.2787, 2.0172], + device='cuda:5'), covar=tensor([0.0456, 0.0382, 0.0591, 0.0447, 0.0259, 0.0422, 0.0348, 0.0418], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0107, 0.0150, 0.0113, 0.0103, 0.0118, 0.0105, 0.0116], + device='cuda:5'), out_proj_covar=tensor([7.9837e-05, 8.2037e-05, 1.1639e-04, 8.5855e-05, 7.9271e-05, 8.6834e-05, + 7.8120e-05, 8.7713e-05], device='cuda:5') +2023-03-27 10:50:46,355 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.613e+01 1.645e+02 1.965e+02 2.346e+02 5.118e+02, threshold=3.931e+02, percent-clipped=1.0 +2023-03-27 10:50:46,988 INFO [finetune.py:976] (5/7) Epoch 29, batch 800, loss[loss=0.1811, simple_loss=0.2515, pruned_loss=0.05535, over 4826.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2441, pruned_loss=0.04844, over 940003.29 frames. ], batch size: 30, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:51:15,286 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161216.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:51:16,487 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2574, 1.3885, 1.6607, 1.5375, 1.5452, 2.9321, 1.3058, 1.5861], + device='cuda:5'), covar=tensor([0.1008, 0.1751, 0.0975, 0.0863, 0.1481, 0.0285, 0.1439, 0.1613], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 10:51:18,881 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7710, 2.5130, 2.2339, 1.0907, 2.2918, 2.1428, 2.0304, 2.3858], + device='cuda:5'), covar=tensor([0.0726, 0.0810, 0.1386, 0.1808, 0.1068, 0.1939, 0.1767, 0.0790], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0188, 0.0200, 0.0179, 0.0207, 0.0208, 0.0221, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:51:20,585 INFO [finetune.py:976] (5/7) Epoch 29, batch 850, loss[loss=0.1571, simple_loss=0.2301, pruned_loss=0.04208, over 4844.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2437, pruned_loss=0.04881, over 943500.23 frames. ], batch size: 44, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:51:24,346 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161231.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:51:48,941 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161257.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:52:04,268 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.030e+02 1.396e+02 1.676e+02 2.040e+02 3.102e+02, threshold=3.353e+02, percent-clipped=0.0 +2023-03-27 10:52:04,924 INFO [finetune.py:976] (5/7) Epoch 29, batch 900, loss[loss=0.1406, simple_loss=0.2088, pruned_loss=0.03617, over 4155.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.2404, pruned_loss=0.04719, over 946582.10 frames. ], batch size: 18, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:52:06,235 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161277.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:52:15,375 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161292.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:52:35,803 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161321.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:52:38,179 INFO [finetune.py:976] (5/7) Epoch 29, batch 950, loss[loss=0.1871, simple_loss=0.2649, pruned_loss=0.05466, over 4808.00 frames. ], tot_loss[loss=0.165, simple_loss=0.2378, pruned_loss=0.04614, over 948848.67 frames. ], batch size: 38, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:52:56,969 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6061, 1.0913, 0.7640, 1.4871, 2.0586, 0.9091, 1.2599, 1.3887], + device='cuda:5'), covar=tensor([0.1492, 0.2137, 0.1651, 0.1189, 0.1804, 0.1953, 0.1549, 0.1988], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0109, 0.0093, 0.0120, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 10:53:00,251 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-27 10:53:28,291 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.815e+01 1.511e+02 1.765e+02 2.170e+02 3.612e+02, threshold=3.529e+02, percent-clipped=1.0 +2023-03-27 10:53:28,921 INFO [finetune.py:976] (5/7) Epoch 29, batch 1000, loss[loss=0.2223, simple_loss=0.2888, pruned_loss=0.07794, over 4719.00 frames. ], tot_loss[loss=0.1678, simple_loss=0.2411, pruned_loss=0.04727, over 951241.79 frames. ], batch size: 59, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:53:48,073 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3166, 1.4190, 1.4583, 0.9065, 1.4518, 1.6996, 1.7208, 1.3487], + device='cuda:5'), covar=tensor([0.0844, 0.0663, 0.0471, 0.0433, 0.0484, 0.0603, 0.0311, 0.0625], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0148, 0.0131, 0.0123, 0.0132, 0.0130, 0.0143, 0.0152], + device='cuda:5'), out_proj_covar=tensor([8.8578e-05, 1.0634e-04, 9.3134e-05, 8.6019e-05, 9.2506e-05, 9.2271e-05, + 1.0194e-04, 1.0874e-04], device='cuda:5') +2023-03-27 10:53:49,441 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161401.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:54:06,943 INFO [finetune.py:976] (5/7) Epoch 29, batch 1050, loss[loss=0.168, simple_loss=0.2396, pruned_loss=0.04823, over 4827.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2425, pruned_loss=0.04709, over 952410.00 frames. ], batch size: 30, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:54:13,088 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6183, 1.4962, 1.3885, 1.5089, 1.9326, 1.8549, 1.6790, 1.4267], + device='cuda:5'), covar=tensor([0.0356, 0.0370, 0.0674, 0.0332, 0.0212, 0.0415, 0.0313, 0.0414], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0107, 0.0149, 0.0113, 0.0102, 0.0118, 0.0105, 0.0115], + device='cuda:5'), out_proj_covar=tensor([7.9509e-05, 8.1921e-05, 1.1587e-04, 8.5688e-05, 7.9166e-05, 8.6512e-05, + 7.8162e-05, 8.7466e-05], device='cuda:5') +2023-03-27 10:54:28,551 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161460.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:54:30,826 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161462.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:54:39,306 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.696e+01 1.443e+02 1.825e+02 2.108e+02 3.139e+02, threshold=3.650e+02, percent-clipped=0.0 +2023-03-27 10:54:39,917 INFO [finetune.py:976] (5/7) Epoch 29, batch 1100, loss[loss=0.1896, simple_loss=0.2604, pruned_loss=0.05943, over 4800.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2453, pruned_loss=0.049, over 954793.81 frames. ], batch size: 45, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:55:11,599 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161521.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:55:14,360 INFO [finetune.py:976] (5/7) Epoch 29, batch 1150, loss[loss=0.171, simple_loss=0.2481, pruned_loss=0.04695, over 4891.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2456, pruned_loss=0.04864, over 955614.84 frames. ], batch size: 32, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:55:15,165 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-03-27 10:55:33,549 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-27 10:55:39,777 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161557.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:55:50,719 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161572.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:55:51,853 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.769e+01 1.420e+02 1.779e+02 2.194e+02 3.095e+02, threshold=3.558e+02, percent-clipped=0.0 +2023-03-27 10:55:52,954 INFO [finetune.py:976] (5/7) Epoch 29, batch 1200, loss[loss=0.1786, simple_loss=0.2514, pruned_loss=0.05293, over 4869.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2437, pruned_loss=0.04826, over 956322.75 frames. ], batch size: 31, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:56:03,033 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161587.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:56:21,514 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 10:56:21,955 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-03-27 10:56:22,442 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=161605.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:56:33,598 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161621.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:56:36,408 INFO [finetune.py:976] (5/7) Epoch 29, batch 1250, loss[loss=0.1623, simple_loss=0.2408, pruned_loss=0.04188, over 4901.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2409, pruned_loss=0.04751, over 956842.96 frames. ], batch size: 43, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:56:40,734 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 10:56:51,966 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6345, 0.7232, 1.7278, 1.6606, 1.5377, 1.4921, 1.5649, 1.6974], + device='cuda:5'), covar=tensor([0.3616, 0.3738, 0.2962, 0.3278, 0.4355, 0.3570, 0.3761, 0.2707], + device='cuda:5'), in_proj_covar=tensor([0.0271, 0.0251, 0.0271, 0.0300, 0.0300, 0.0277, 0.0307, 0.0255], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:57:07,735 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=161669.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:57:07,805 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5811, 2.4228, 1.8631, 0.9094, 2.0593, 2.1070, 1.9382, 2.2429], + device='cuda:5'), covar=tensor([0.0805, 0.0670, 0.1391, 0.1912, 0.1177, 0.2250, 0.2031, 0.0735], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0188, 0.0200, 0.0179, 0.0208, 0.0209, 0.0222, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 10:57:07,817 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.2734, 1.5247, 1.6634, 0.8990, 1.6420, 1.9055, 1.8898, 1.5336], + device='cuda:5'), covar=tensor([0.0920, 0.0668, 0.0517, 0.0492, 0.0447, 0.0609, 0.0312, 0.0651], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0148, 0.0131, 0.0122, 0.0132, 0.0130, 0.0143, 0.0152], + device='cuda:5'), out_proj_covar=tensor([8.8020e-05, 1.0589e-04, 9.3013e-05, 8.5697e-05, 9.2468e-05, 9.2013e-05, + 1.0156e-04, 1.0853e-04], device='cuda:5') +2023-03-27 10:57:10,137 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3899, 1.2813, 1.2791, 1.3182, 1.6221, 1.5439, 1.3796, 1.2183], + device='cuda:5'), covar=tensor([0.0364, 0.0357, 0.0741, 0.0328, 0.0277, 0.0524, 0.0384, 0.0434], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0107, 0.0149, 0.0112, 0.0102, 0.0118, 0.0105, 0.0115], + device='cuda:5'), out_proj_covar=tensor([7.9511e-05, 8.1784e-05, 1.1567e-04, 8.5501e-05, 7.9208e-05, 8.6742e-05, + 7.8247e-05, 8.7668e-05], device='cuda:5') +2023-03-27 10:57:11,726 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.453e+02 1.676e+02 2.058e+02 3.499e+02, threshold=3.351e+02, percent-clipped=0.0 +2023-03-27 10:57:12,871 INFO [finetune.py:976] (5/7) Epoch 29, batch 1300, loss[loss=0.1559, simple_loss=0.238, pruned_loss=0.03692, over 4861.00 frames. ], tot_loss[loss=0.1646, simple_loss=0.2373, pruned_loss=0.04596, over 958190.68 frames. ], batch size: 49, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:57:47,297 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-03-27 10:57:54,739 INFO [finetune.py:976] (5/7) Epoch 29, batch 1350, loss[loss=0.2078, simple_loss=0.2751, pruned_loss=0.07027, over 4838.00 frames. ], tot_loss[loss=0.1645, simple_loss=0.237, pruned_loss=0.046, over 957882.55 frames. ], batch size: 49, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:58:29,772 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161757.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:58:49,498 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.561e+02 1.829e+02 2.338e+02 6.236e+02, threshold=3.658e+02, percent-clipped=4.0 +2023-03-27 10:58:50,571 INFO [finetune.py:976] (5/7) Epoch 29, batch 1400, loss[loss=0.1797, simple_loss=0.2503, pruned_loss=0.05458, over 4914.00 frames. ], tot_loss[loss=0.167, simple_loss=0.24, pruned_loss=0.047, over 957146.69 frames. ], batch size: 36, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:59:17,540 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161815.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:59:18,092 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161816.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:59:23,988 INFO [finetune.py:976] (5/7) Epoch 29, batch 1450, loss[loss=0.1955, simple_loss=0.2755, pruned_loss=0.05773, over 4824.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2435, pruned_loss=0.04801, over 957338.20 frames. ], batch size: 33, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:59:33,521 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.98 vs. limit=5.0 +2023-03-27 10:59:55,403 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161872.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 10:59:56,504 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.509e+02 1.810e+02 2.171e+02 4.074e+02, threshold=3.620e+02, percent-clipped=1.0 +2023-03-27 10:59:57,114 INFO [finetune.py:976] (5/7) Epoch 29, batch 1500, loss[loss=0.1676, simple_loss=0.2481, pruned_loss=0.04354, over 4737.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2449, pruned_loss=0.04858, over 955676.34 frames. ], batch size: 27, lr: 2.85e-03, grad_scale: 16.0 +2023-03-27 10:59:58,332 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161876.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:00:05,890 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161887.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:00:27,815 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=161920.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:00:32,887 INFO [finetune.py:976] (5/7) Epoch 29, batch 1550, loss[loss=0.1746, simple_loss=0.2396, pruned_loss=0.05485, over 4731.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.2441, pruned_loss=0.04833, over 955970.76 frames. ], batch size: 54, lr: 2.85e-03, grad_scale: 32.0 +2023-03-27 11:00:43,611 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-03-27 11:00:44,438 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=161935.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:01:02,063 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.17 vs. limit=5.0 +2023-03-27 11:01:16,694 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.413e+02 1.667e+02 1.984e+02 3.261e+02, threshold=3.334e+02, percent-clipped=0.0 +2023-03-27 11:01:17,326 INFO [finetune.py:976] (5/7) Epoch 29, batch 1600, loss[loss=0.1731, simple_loss=0.2438, pruned_loss=0.05121, over 4721.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2424, pruned_loss=0.04811, over 956637.27 frames. ], batch size: 59, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:01:45,582 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7793, 1.2329, 0.8398, 1.5910, 2.2584, 1.3437, 1.3898, 1.6381], + device='cuda:5'), covar=tensor([0.1445, 0.2030, 0.1911, 0.1187, 0.1702, 0.1854, 0.1446, 0.1901], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0108, 0.0093, 0.0119, 0.0092, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:01:53,506 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9372, 1.5266, 1.0446, 1.8955, 2.2931, 1.7657, 1.6614, 1.9678], + device='cuda:5'), covar=tensor([0.1157, 0.1528, 0.1647, 0.0871, 0.1488, 0.1608, 0.1051, 0.1405], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0108, 0.0093, 0.0119, 0.0092, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:01:59,457 INFO [finetune.py:976] (5/7) Epoch 29, batch 1650, loss[loss=0.1476, simple_loss=0.2282, pruned_loss=0.0335, over 4766.00 frames. ], tot_loss[loss=0.167, simple_loss=0.2395, pruned_loss=0.04727, over 956909.64 frames. ], batch size: 28, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:02:22,370 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162057.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:02:34,948 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.499e+01 1.468e+02 1.758e+02 2.171e+02 4.899e+02, threshold=3.516e+02, percent-clipped=3.0 +2023-03-27 11:02:35,583 INFO [finetune.py:976] (5/7) Epoch 29, batch 1700, loss[loss=0.1843, simple_loss=0.2479, pruned_loss=0.0603, over 4929.00 frames. ], tot_loss[loss=0.1651, simple_loss=0.2373, pruned_loss=0.04642, over 955931.41 frames. ], batch size: 38, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:02:36,930 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162077.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:03:01,317 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=162105.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:03:02,610 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6867, 1.5449, 1.0830, 0.3268, 1.2834, 1.4958, 1.4899, 1.4012], + device='cuda:5'), covar=tensor([0.0990, 0.0894, 0.1588, 0.2074, 0.1531, 0.2981, 0.2451, 0.0979], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0189, 0.0201, 0.0181, 0.0209, 0.0210, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:03:08,001 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162116.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:03:16,127 INFO [finetune.py:976] (5/7) Epoch 29, batch 1750, loss[loss=0.1887, simple_loss=0.2629, pruned_loss=0.0573, over 4896.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.2407, pruned_loss=0.04818, over 957100.23 frames. ], batch size: 35, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:03:24,623 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162138.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:03:59,958 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=162164.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:04:02,684 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-03-27 11:04:04,284 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162171.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:04:10,047 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.681e+01 1.626e+02 1.856e+02 2.284e+02 4.131e+02, threshold=3.712e+02, percent-clipped=2.0 +2023-03-27 11:04:10,644 INFO [finetune.py:976] (5/7) Epoch 29, batch 1800, loss[loss=0.1862, simple_loss=0.2519, pruned_loss=0.06029, over 4738.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2436, pruned_loss=0.04847, over 957820.35 frames. ], batch size: 27, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:04:44,452 INFO [finetune.py:976] (5/7) Epoch 29, batch 1850, loss[loss=0.1556, simple_loss=0.2234, pruned_loss=0.04396, over 4865.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.2455, pruned_loss=0.04952, over 956921.28 frames. ], batch size: 34, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:04:48,196 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162231.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:05:04,055 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2464, 2.1101, 2.3523, 1.6637, 2.1679, 2.3696, 2.3957, 1.7773], + device='cuda:5'), covar=tensor([0.0523, 0.0684, 0.0600, 0.0797, 0.0742, 0.0627, 0.0557, 0.1168], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0137, 0.0140, 0.0118, 0.0128, 0.0139, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:05:17,384 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.145e+02 1.604e+02 1.802e+02 2.267e+02 3.875e+02, threshold=3.605e+02, percent-clipped=1.0 +2023-03-27 11:05:17,994 INFO [finetune.py:976] (5/7) Epoch 29, batch 1900, loss[loss=0.1663, simple_loss=0.2538, pruned_loss=0.03942, over 4850.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.2463, pruned_loss=0.0492, over 957745.40 frames. ], batch size: 44, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:05:28,440 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162292.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:05:51,673 INFO [finetune.py:976] (5/7) Epoch 29, batch 1950, loss[loss=0.1495, simple_loss=0.2163, pruned_loss=0.04132, over 4862.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2437, pruned_loss=0.04801, over 954863.07 frames. ], batch size: 31, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:06:20,064 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5701, 3.3936, 3.1838, 1.5916, 3.5413, 2.6385, 0.9081, 2.3581], + device='cuda:5'), covar=tensor([0.2242, 0.2131, 0.1807, 0.3443, 0.1212, 0.1042, 0.4383, 0.1766], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0179, 0.0159, 0.0128, 0.0162, 0.0123, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 11:06:36,625 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.724e+01 1.430e+02 1.855e+02 2.130e+02 3.474e+02, threshold=3.709e+02, percent-clipped=0.0 +2023-03-27 11:06:37,253 INFO [finetune.py:976] (5/7) Epoch 29, batch 2000, loss[loss=0.1761, simple_loss=0.2502, pruned_loss=0.05096, over 4901.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2423, pruned_loss=0.0482, over 955103.08 frames. ], batch size: 35, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:06:50,560 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.11 vs. limit=2.0 +2023-03-27 11:07:14,706 INFO [finetune.py:976] (5/7) Epoch 29, batch 2050, loss[loss=0.1499, simple_loss=0.226, pruned_loss=0.03694, over 4901.00 frames. ], tot_loss[loss=0.1658, simple_loss=0.2385, pruned_loss=0.04661, over 953463.14 frames. ], batch size: 32, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:07:17,905 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.11 vs. limit=5.0 +2023-03-27 11:07:19,656 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162433.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:07:45,862 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162471.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:07:47,601 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.116e+01 1.601e+02 1.964e+02 2.330e+02 4.874e+02, threshold=3.928e+02, percent-clipped=3.0 +2023-03-27 11:07:48,225 INFO [finetune.py:976] (5/7) Epoch 29, batch 2100, loss[loss=0.178, simple_loss=0.2458, pruned_loss=0.05511, over 4890.00 frames. ], tot_loss[loss=0.1664, simple_loss=0.2385, pruned_loss=0.04713, over 953846.74 frames. ], batch size: 32, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:08:22,586 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162511.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:08:28,428 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=162519.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:08:34,949 INFO [finetune.py:976] (5/7) Epoch 29, batch 2150, loss[loss=0.1815, simple_loss=0.2599, pruned_loss=0.05156, over 4924.00 frames. ], tot_loss[loss=0.1675, simple_loss=0.2407, pruned_loss=0.0472, over 955058.19 frames. ], batch size: 42, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:09:06,200 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2147, 1.8353, 2.4468, 1.5830, 2.0714, 2.3957, 1.7348, 2.4507], + device='cuda:5'), covar=tensor([0.1182, 0.2050, 0.1388, 0.2069, 0.0977, 0.1257, 0.2709, 0.0828], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0205, 0.0192, 0.0189, 0.0174, 0.0212, 0.0217, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:09:12,067 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-03-27 11:09:17,101 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162572.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 11:09:18,164 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.053e+02 1.475e+02 1.779e+02 2.116e+02 3.329e+02, threshold=3.558e+02, percent-clipped=0.0 +2023-03-27 11:09:18,786 INFO [finetune.py:976] (5/7) Epoch 29, batch 2200, loss[loss=0.1777, simple_loss=0.2358, pruned_loss=0.05978, over 4763.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.2433, pruned_loss=0.04803, over 955611.38 frames. ], batch size: 26, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:09:27,190 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162587.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:10:00,797 INFO [finetune.py:976] (5/7) Epoch 29, batch 2250, loss[loss=0.1825, simple_loss=0.2733, pruned_loss=0.04579, over 4904.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.2449, pruned_loss=0.04844, over 957752.96 frames. ], batch size: 36, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:10:06,547 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5658, 3.7870, 3.5725, 1.8871, 3.9689, 2.9472, 1.1075, 2.7113], + device='cuda:5'), covar=tensor([0.2364, 0.1806, 0.1475, 0.3034, 0.0867, 0.0905, 0.3923, 0.1316], + device='cuda:5'), in_proj_covar=tensor([0.0150, 0.0180, 0.0159, 0.0128, 0.0162, 0.0123, 0.0148, 0.0125], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 11:10:19,420 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7778, 2.1727, 3.0155, 1.7683, 2.4732, 2.9899, 2.1000, 2.9062], + device='cuda:5'), covar=tensor([0.1007, 0.1917, 0.1264, 0.2190, 0.0873, 0.1083, 0.2498, 0.0798], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0206, 0.0193, 0.0190, 0.0175, 0.0213, 0.0219, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:10:33,516 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.222e+01 1.558e+02 1.861e+02 2.054e+02 5.864e+02, threshold=3.723e+02, percent-clipped=1.0 +2023-03-27 11:10:34,117 INFO [finetune.py:976] (5/7) Epoch 29, batch 2300, loss[loss=0.1836, simple_loss=0.2608, pruned_loss=0.05321, over 4771.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2451, pruned_loss=0.04817, over 955531.45 frames. ], batch size: 51, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:10:46,485 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5030, 2.3913, 1.9519, 2.6678, 2.4755, 2.1733, 2.9334, 2.5789], + device='cuda:5'), covar=tensor([0.1297, 0.2074, 0.2753, 0.2488, 0.2475, 0.1521, 0.2963, 0.1524], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0236, 0.0253, 0.0250, 0.0209, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:11:09,595 INFO [finetune.py:976] (5/7) Epoch 29, batch 2350, loss[loss=0.1762, simple_loss=0.2497, pruned_loss=0.05141, over 4900.00 frames. ], tot_loss[loss=0.169, simple_loss=0.2426, pruned_loss=0.04771, over 952836.81 frames. ], batch size: 35, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:11:20,035 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162733.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:11:50,494 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.467e+01 1.478e+02 1.866e+02 2.325e+02 4.091e+02, threshold=3.732e+02, percent-clipped=2.0 +2023-03-27 11:11:51,107 INFO [finetune.py:976] (5/7) Epoch 29, batch 2400, loss[loss=0.146, simple_loss=0.2167, pruned_loss=0.03759, over 4937.00 frames. ], tot_loss[loss=0.1669, simple_loss=0.2396, pruned_loss=0.04712, over 954201.75 frames. ], batch size: 38, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:11:51,235 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6697, 2.4578, 2.1214, 1.0916, 2.3083, 2.0457, 1.8270, 2.3885], + device='cuda:5'), covar=tensor([0.0834, 0.0775, 0.1689, 0.1984, 0.1446, 0.2170, 0.2049, 0.0879], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0188, 0.0201, 0.0180, 0.0208, 0.0209, 0.0222, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:11:56,643 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=162781.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:12:33,618 INFO [finetune.py:976] (5/7) Epoch 29, batch 2450, loss[loss=0.1956, simple_loss=0.2581, pruned_loss=0.0665, over 4871.00 frames. ], tot_loss[loss=0.164, simple_loss=0.2363, pruned_loss=0.04584, over 956202.27 frames. ], batch size: 31, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:12:35,952 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162827.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:13:02,269 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162867.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 11:13:09,317 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.738e+01 1.481e+02 1.739e+02 2.016e+02 4.521e+02, threshold=3.479e+02, percent-clipped=1.0 +2023-03-27 11:13:09,947 INFO [finetune.py:976] (5/7) Epoch 29, batch 2500, loss[loss=0.1728, simple_loss=0.2577, pruned_loss=0.04394, over 4868.00 frames. ], tot_loss[loss=0.1665, simple_loss=0.2388, pruned_loss=0.04711, over 957418.64 frames. ], batch size: 34, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:13:27,464 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162887.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:13:28,127 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162888.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:13:34,823 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.39 vs. limit=5.0 +2023-03-27 11:13:43,037 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-27 11:13:52,415 INFO [finetune.py:976] (5/7) Epoch 29, batch 2550, loss[loss=0.1852, simple_loss=0.2621, pruned_loss=0.05416, over 4836.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2435, pruned_loss=0.04844, over 953923.25 frames. ], batch size: 49, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:14:01,044 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4682, 1.4245, 1.3433, 1.4170, 1.7898, 1.7413, 1.5621, 1.3473], + device='cuda:5'), covar=tensor([0.0363, 0.0343, 0.0680, 0.0337, 0.0223, 0.0428, 0.0324, 0.0449], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0107, 0.0149, 0.0113, 0.0103, 0.0118, 0.0105, 0.0116], + device='cuda:5'), out_proj_covar=tensor([7.9631e-05, 8.1608e-05, 1.1610e-04, 8.5718e-05, 7.9392e-05, 8.6870e-05, + 7.8210e-05, 8.7732e-05], device='cuda:5') +2023-03-27 11:14:01,597 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=162935.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:14:02,013 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-03-27 11:14:09,152 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2254, 2.1766, 2.0050, 1.2471, 2.1387, 1.8362, 1.7014, 2.1521], + device='cuda:5'), covar=tensor([0.1078, 0.0766, 0.1743, 0.1935, 0.1433, 0.2143, 0.2264, 0.0896], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0187, 0.0201, 0.0180, 0.0208, 0.0209, 0.0222, 0.0194], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:14:36,963 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.986e+01 1.516e+02 1.791e+02 2.248e+02 3.208e+02, threshold=3.582e+02, percent-clipped=0.0 +2023-03-27 11:14:37,598 INFO [finetune.py:976] (5/7) Epoch 29, batch 2600, loss[loss=0.1447, simple_loss=0.213, pruned_loss=0.03821, over 4750.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2448, pruned_loss=0.04854, over 954476.02 frames. ], batch size: 26, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:15:09,256 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7373, 1.6096, 1.5635, 1.5999, 1.3755, 3.6142, 1.4618, 1.8572], + device='cuda:5'), covar=tensor([0.3192, 0.2408, 0.2102, 0.2351, 0.1549, 0.0198, 0.2479, 0.1180], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0112, 0.0095, 0.0093, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 11:15:17,743 INFO [finetune.py:976] (5/7) Epoch 29, batch 2650, loss[loss=0.1888, simple_loss=0.2577, pruned_loss=0.05997, over 4874.00 frames. ], tot_loss[loss=0.1717, simple_loss=0.2456, pruned_loss=0.0489, over 954295.12 frames. ], batch size: 35, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:15:29,787 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3053, 2.1901, 1.7863, 2.3716, 2.2809, 2.0460, 2.6625, 2.2529], + device='cuda:5'), covar=tensor([0.1459, 0.2138, 0.3121, 0.2371, 0.2530, 0.1875, 0.2544, 0.1898], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0190, 0.0236, 0.0253, 0.0250, 0.0208, 0.0216, 0.0203], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:15:51,095 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.228e+01 1.484e+02 1.662e+02 2.061e+02 3.704e+02, threshold=3.324e+02, percent-clipped=1.0 +2023-03-27 11:15:51,718 INFO [finetune.py:976] (5/7) Epoch 29, batch 2700, loss[loss=0.1736, simple_loss=0.2429, pruned_loss=0.05211, over 4876.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.2446, pruned_loss=0.04843, over 956089.02 frames. ], batch size: 34, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:16:02,234 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163090.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:16:14,565 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8173, 1.0572, 1.8720, 1.8193, 1.6673, 1.6109, 1.7737, 1.7922], + device='cuda:5'), covar=tensor([0.3635, 0.3642, 0.2861, 0.3136, 0.4092, 0.3428, 0.3767, 0.2737], + device='cuda:5'), in_proj_covar=tensor([0.0269, 0.0249, 0.0269, 0.0299, 0.0298, 0.0276, 0.0304, 0.0254], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:16:20,748 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-03-27 11:16:21,579 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6101, 1.4885, 2.2901, 1.8564, 1.7736, 4.1857, 1.5261, 1.6662], + device='cuda:5'), covar=tensor([0.1015, 0.1886, 0.1237, 0.0990, 0.1593, 0.0170, 0.1446, 0.1783], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0076, 0.0091, 0.0080, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 11:16:25,137 INFO [finetune.py:976] (5/7) Epoch 29, batch 2750, loss[loss=0.1789, simple_loss=0.25, pruned_loss=0.05392, over 4865.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2417, pruned_loss=0.04746, over 957448.49 frames. ], batch size: 34, lr: 2.84e-03, grad_scale: 32.0 +2023-03-27 11:16:40,436 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-03-27 11:16:52,777 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163151.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:17:03,844 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163167.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 11:17:10,458 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.013e+02 1.483e+02 1.714e+02 1.995e+02 3.279e+02, threshold=3.429e+02, percent-clipped=0.0 +2023-03-27 11:17:10,474 INFO [finetune.py:976] (5/7) Epoch 29, batch 2800, loss[loss=0.1478, simple_loss=0.2204, pruned_loss=0.03757, over 4840.00 frames. ], tot_loss[loss=0.1652, simple_loss=0.2381, pruned_loss=0.04614, over 958429.10 frames. ], batch size: 30, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:17:15,462 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163183.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:17:37,947 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=163215.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:17:42,735 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2847, 1.4739, 0.7956, 1.9670, 2.6026, 1.8714, 1.8518, 1.9050], + device='cuda:5'), covar=tensor([0.1374, 0.2060, 0.2044, 0.1144, 0.1700, 0.1852, 0.1364, 0.2041], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0093, 0.0109, 0.0093, 0.0120, 0.0092, 0.0097, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:17:44,453 INFO [finetune.py:976] (5/7) Epoch 29, batch 2850, loss[loss=0.1854, simple_loss=0.2503, pruned_loss=0.06027, over 4028.00 frames. ], tot_loss[loss=0.1638, simple_loss=0.2362, pruned_loss=0.04566, over 957055.32 frames. ], batch size: 65, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:17:59,022 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163240.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:18:31,673 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.921e+01 1.528e+02 1.890e+02 2.266e+02 3.566e+02, threshold=3.779e+02, percent-clipped=1.0 +2023-03-27 11:18:31,689 INFO [finetune.py:976] (5/7) Epoch 29, batch 2900, loss[loss=0.1478, simple_loss=0.2237, pruned_loss=0.03592, over 4915.00 frames. ], tot_loss[loss=0.1657, simple_loss=0.2387, pruned_loss=0.04637, over 956945.88 frames. ], batch size: 37, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:18:52,072 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163301.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:19:08,248 INFO [finetune.py:976] (5/7) Epoch 29, batch 2950, loss[loss=0.1552, simple_loss=0.2217, pruned_loss=0.04434, over 4803.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2418, pruned_loss=0.0476, over 956271.55 frames. ], batch size: 25, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:19:21,245 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5732, 1.4708, 2.0417, 3.1747, 2.1345, 2.3167, 0.9926, 2.6647], + device='cuda:5'), covar=tensor([0.1741, 0.1452, 0.1229, 0.0570, 0.0814, 0.1361, 0.1890, 0.0489], + device='cuda:5'), in_proj_covar=tensor([0.0101, 0.0115, 0.0133, 0.0165, 0.0101, 0.0136, 0.0126, 0.0102], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:19:44,406 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-03-27 11:19:49,296 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.530e+02 1.927e+02 2.309e+02 4.929e+02, threshold=3.855e+02, percent-clipped=3.0 +2023-03-27 11:19:49,312 INFO [finetune.py:976] (5/7) Epoch 29, batch 3000, loss[loss=0.159, simple_loss=0.2443, pruned_loss=0.03688, over 4717.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2436, pruned_loss=0.04795, over 956702.42 frames. ], batch size: 54, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:19:49,312 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 11:19:56,734 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8636, 1.7022, 2.0205, 1.3684, 1.6967, 2.0235, 1.5771, 2.1867], + device='cuda:5'), covar=tensor([0.1107, 0.2117, 0.1487, 0.1798, 0.0982, 0.1225, 0.2889, 0.0775], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0208, 0.0194, 0.0191, 0.0176, 0.0214, 0.0220, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:20:05,061 INFO [finetune.py:1010] (5/7) Epoch 29, validation: loss=0.158, simple_loss=0.2251, pruned_loss=0.04545, over 2265189.00 frames. +2023-03-27 11:20:05,062 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 11:20:21,024 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6602, 1.6067, 1.4794, 1.5296, 1.9755, 1.8839, 1.6355, 1.4684], + device='cuda:5'), covar=tensor([0.0372, 0.0348, 0.0626, 0.0340, 0.0261, 0.0579, 0.0400, 0.0527], + device='cuda:5'), in_proj_covar=tensor([0.0103, 0.0107, 0.0150, 0.0112, 0.0103, 0.0118, 0.0106, 0.0116], + device='cuda:5'), out_proj_covar=tensor([7.9522e-05, 8.1649e-05, 1.1627e-04, 8.5572e-05, 7.9883e-05, 8.6889e-05, + 7.8509e-05, 8.7852e-05], device='cuda:5') +2023-03-27 11:20:43,033 INFO [finetune.py:976] (5/7) Epoch 29, batch 3050, loss[loss=0.1386, simple_loss=0.2189, pruned_loss=0.02912, over 4850.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2448, pruned_loss=0.04815, over 957079.77 frames. ], batch size: 49, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:20:47,337 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.82 vs. limit=5.0 +2023-03-27 11:20:57,924 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163446.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:21:13,472 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.57 vs. limit=5.0 +2023-03-27 11:21:16,334 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.820e+01 1.385e+02 1.622e+02 1.937e+02 3.745e+02, threshold=3.244e+02, percent-clipped=0.0 +2023-03-27 11:21:16,350 INFO [finetune.py:976] (5/7) Epoch 29, batch 3100, loss[loss=0.1503, simple_loss=0.2242, pruned_loss=0.03821, over 4903.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.242, pruned_loss=0.04735, over 955463.73 frames. ], batch size: 36, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:21:22,347 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163483.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:21:32,966 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2084, 1.7593, 2.2014, 2.2949, 1.9722, 1.9689, 2.1851, 2.0617], + device='cuda:5'), covar=tensor([0.4084, 0.4168, 0.3351, 0.3937, 0.5024, 0.4075, 0.5014, 0.3042], + device='cuda:5'), in_proj_covar=tensor([0.0268, 0.0249, 0.0270, 0.0299, 0.0298, 0.0276, 0.0304, 0.0254], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:21:33,807 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-03-27 11:21:51,332 INFO [finetune.py:976] (5/7) Epoch 29, batch 3150, loss[loss=0.1672, simple_loss=0.2336, pruned_loss=0.05042, over 4777.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2388, pruned_loss=0.04688, over 954258.77 frames. ], batch size: 51, lr: 2.84e-03, grad_scale: 16.0 +2023-03-27 11:21:51,442 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6177, 1.4432, 1.0077, 0.3981, 1.2927, 1.4651, 1.2326, 1.3334], + device='cuda:5'), covar=tensor([0.0751, 0.0831, 0.1153, 0.1584, 0.1108, 0.1886, 0.2115, 0.0778], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0187, 0.0200, 0.0180, 0.0207, 0.0208, 0.0222, 0.0193], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:21:53,234 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5823, 1.5150, 1.9981, 2.9710, 2.0690, 2.2362, 1.1418, 2.5877], + device='cuda:5'), covar=tensor([0.1766, 0.1405, 0.1218, 0.0624, 0.0766, 0.1299, 0.1681, 0.0479], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0115, 0.0133, 0.0164, 0.0100, 0.0136, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:21:55,512 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=163531.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:22:27,164 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-27 11:22:33,523 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.004e+02 1.497e+02 1.704e+02 2.227e+02 4.213e+02, threshold=3.407e+02, percent-clipped=5.0 +2023-03-27 11:22:33,539 INFO [finetune.py:976] (5/7) Epoch 29, batch 3200, loss[loss=0.1618, simple_loss=0.2285, pruned_loss=0.04756, over 4781.00 frames. ], tot_loss[loss=0.1641, simple_loss=0.2363, pruned_loss=0.04594, over 955019.29 frames. ], batch size: 26, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:22:49,217 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163596.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:22:59,114 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8095, 1.6882, 2.2473, 3.6353, 2.4571, 2.5175, 0.9322, 3.0769], + device='cuda:5'), covar=tensor([0.1685, 0.1348, 0.1229, 0.0508, 0.0748, 0.1497, 0.1999, 0.0431], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0115, 0.0133, 0.0164, 0.0100, 0.0135, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:23:01,326 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-03-27 11:23:07,473 INFO [finetune.py:976] (5/7) Epoch 29, batch 3250, loss[loss=0.1592, simple_loss=0.2397, pruned_loss=0.03937, over 4768.00 frames. ], tot_loss[loss=0.1636, simple_loss=0.2357, pruned_loss=0.04572, over 954209.87 frames. ], batch size: 27, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:23:52,620 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.629e+02 1.981e+02 2.506e+02 4.570e+02, threshold=3.962e+02, percent-clipped=6.0 +2023-03-27 11:23:52,636 INFO [finetune.py:976] (5/7) Epoch 29, batch 3300, loss[loss=0.1668, simple_loss=0.2488, pruned_loss=0.04241, over 4834.00 frames. ], tot_loss[loss=0.1662, simple_loss=0.2391, pruned_loss=0.04666, over 952714.73 frames. ], batch size: 47, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:24:29,877 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3148, 1.2581, 1.2510, 1.2680, 1.5947, 1.5352, 1.3243, 1.1871], + device='cuda:5'), covar=tensor([0.0401, 0.0322, 0.0629, 0.0323, 0.0258, 0.0460, 0.0340, 0.0428], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0106, 0.0148, 0.0112, 0.0103, 0.0117, 0.0105, 0.0115], + device='cuda:5'), out_proj_covar=tensor([7.9039e-05, 8.1144e-05, 1.1525e-04, 8.4967e-05, 7.9363e-05, 8.6344e-05, + 7.7773e-05, 8.7268e-05], device='cuda:5') +2023-03-27 11:24:34,741 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-03-27 11:24:37,007 INFO [finetune.py:976] (5/7) Epoch 29, batch 3350, loss[loss=0.1843, simple_loss=0.2502, pruned_loss=0.05922, over 4754.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2433, pruned_loss=0.04857, over 953639.98 frames. ], batch size: 27, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:24:55,817 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163746.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:25:21,357 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.486e+02 1.801e+02 2.174e+02 4.171e+02, threshold=3.603e+02, percent-clipped=1.0 +2023-03-27 11:25:21,373 INFO [finetune.py:976] (5/7) Epoch 29, batch 3400, loss[loss=0.1568, simple_loss=0.2288, pruned_loss=0.04238, over 4759.00 frames. ], tot_loss[loss=0.1716, simple_loss=0.245, pruned_loss=0.04909, over 955795.20 frames. ], batch size: 26, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:25:37,708 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=163794.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:25:51,327 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3853, 1.6357, 2.4286, 1.9606, 1.8450, 4.3400, 1.6618, 1.8339], + device='cuda:5'), covar=tensor([0.1030, 0.1804, 0.1095, 0.0926, 0.1588, 0.0173, 0.1433, 0.1752], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0077, 0.0092, 0.0081, 0.0086, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 11:25:52,293 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-03-27 11:25:58,974 INFO [finetune.py:976] (5/7) Epoch 29, batch 3450, loss[loss=0.147, simple_loss=0.2187, pruned_loss=0.03767, over 4395.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2437, pruned_loss=0.04809, over 957611.84 frames. ], batch size: 19, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:26:16,241 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8989, 1.5288, 1.9406, 1.8735, 1.6925, 1.6824, 1.9136, 1.8772], + device='cuda:5'), covar=tensor([0.3488, 0.3612, 0.2953, 0.3748, 0.4365, 0.3922, 0.4232, 0.2687], + device='cuda:5'), in_proj_covar=tensor([0.0269, 0.0250, 0.0269, 0.0300, 0.0299, 0.0277, 0.0305, 0.0254], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:26:41,243 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.103e+01 1.539e+02 1.877e+02 2.200e+02 3.171e+02, threshold=3.754e+02, percent-clipped=0.0 +2023-03-27 11:26:41,259 INFO [finetune.py:976] (5/7) Epoch 29, batch 3500, loss[loss=0.1615, simple_loss=0.237, pruned_loss=0.04299, over 4767.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2425, pruned_loss=0.04817, over 956503.14 frames. ], batch size: 28, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:26:42,560 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4690, 1.3591, 2.0742, 1.8404, 1.5971, 3.6095, 1.3849, 1.6060], + device='cuda:5'), covar=tensor([0.1011, 0.2011, 0.1053, 0.0962, 0.1678, 0.0219, 0.1604, 0.1871], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0077, 0.0092, 0.0081, 0.0086, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 11:26:55,490 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163896.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:27:17,137 INFO [finetune.py:976] (5/7) Epoch 29, batch 3550, loss[loss=0.1422, simple_loss=0.2129, pruned_loss=0.03577, over 4827.00 frames. ], tot_loss[loss=0.1669, simple_loss=0.2392, pruned_loss=0.04729, over 954751.42 frames. ], batch size: 41, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:27:38,584 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=163944.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:27:59,310 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.875e+01 1.446e+02 1.734e+02 2.162e+02 4.667e+02, threshold=3.468e+02, percent-clipped=1.0 +2023-03-27 11:27:59,326 INFO [finetune.py:976] (5/7) Epoch 29, batch 3600, loss[loss=0.1357, simple_loss=0.2115, pruned_loss=0.02996, over 4760.00 frames. ], tot_loss[loss=0.1647, simple_loss=0.2361, pruned_loss=0.04661, over 953789.72 frames. ], batch size: 28, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:28:36,273 INFO [finetune.py:976] (5/7) Epoch 29, batch 3650, loss[loss=0.1461, simple_loss=0.2133, pruned_loss=0.03949, over 4772.00 frames. ], tot_loss[loss=0.1667, simple_loss=0.2381, pruned_loss=0.04763, over 954494.83 frames. ], batch size: 26, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:29:19,122 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.932e+01 1.541e+02 1.828e+02 2.331e+02 7.133e+02, threshold=3.656e+02, percent-clipped=4.0 +2023-03-27 11:29:19,138 INFO [finetune.py:976] (5/7) Epoch 29, batch 3700, loss[loss=0.1985, simple_loss=0.2723, pruned_loss=0.06239, over 4913.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2409, pruned_loss=0.04803, over 953806.43 frames. ], batch size: 42, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:29:36,834 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164090.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:30:00,905 INFO [finetune.py:976] (5/7) Epoch 29, batch 3750, loss[loss=0.1873, simple_loss=0.2626, pruned_loss=0.05597, over 4924.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.2415, pruned_loss=0.04792, over 952461.56 frames. ], batch size: 41, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:30:17,243 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164151.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:30:20,044 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164155.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:30:37,148 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.292e+01 1.655e+02 1.832e+02 2.179e+02 3.362e+02, threshold=3.664e+02, percent-clipped=0.0 +2023-03-27 11:30:37,164 INFO [finetune.py:976] (5/7) Epoch 29, batch 3800, loss[loss=0.2063, simple_loss=0.2774, pruned_loss=0.0676, over 4850.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.2429, pruned_loss=0.04795, over 954448.35 frames. ], batch size: 44, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:30:44,006 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3439, 1.8770, 2.3232, 2.3344, 2.0503, 2.0476, 2.2491, 2.1778], + device='cuda:5'), covar=tensor([0.3967, 0.4102, 0.3232, 0.3929, 0.5212, 0.3849, 0.5054, 0.2908], + device='cuda:5'), in_proj_covar=tensor([0.0269, 0.0250, 0.0269, 0.0300, 0.0299, 0.0277, 0.0304, 0.0254], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:30:51,806 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.9471, 2.6520, 2.5313, 1.3501, 2.7185, 2.1110, 1.9688, 2.4716], + device='cuda:5'), covar=tensor([0.1211, 0.0786, 0.1794, 0.2169, 0.1490, 0.2375, 0.2223, 0.1148], + device='cuda:5'), in_proj_covar=tensor([0.0172, 0.0190, 0.0203, 0.0182, 0.0210, 0.0211, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:31:03,881 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164216.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:31:09,783 INFO [finetune.py:976] (5/7) Epoch 29, batch 3850, loss[loss=0.155, simple_loss=0.2372, pruned_loss=0.03639, over 4761.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2416, pruned_loss=0.04741, over 953847.93 frames. ], batch size: 26, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:31:11,286 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.47 vs. limit=5.0 +2023-03-27 11:31:19,346 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1598, 1.2240, 1.4270, 1.3284, 1.4259, 2.4389, 1.2382, 1.4147], + device='cuda:5'), covar=tensor([0.0947, 0.2075, 0.1064, 0.0949, 0.1684, 0.0364, 0.1595, 0.1947], + device='cuda:5'), in_proj_covar=tensor([0.0076, 0.0084, 0.0074, 0.0077, 0.0092, 0.0082, 0.0087, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 11:31:25,442 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3189, 1.5912, 0.8753, 2.0702, 2.6968, 1.8607, 1.9509, 1.8902], + device='cuda:5'), covar=tensor([0.1306, 0.1922, 0.1861, 0.1141, 0.1581, 0.1782, 0.1304, 0.1946], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0108, 0.0093, 0.0120, 0.0092, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:31:45,640 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.748e+01 1.394e+02 1.747e+02 2.221e+02 3.425e+02, threshold=3.494e+02, percent-clipped=0.0 +2023-03-27 11:31:45,656 INFO [finetune.py:976] (5/7) Epoch 29, batch 3900, loss[loss=0.1808, simple_loss=0.2441, pruned_loss=0.05878, over 4339.00 frames. ], tot_loss[loss=0.1653, simple_loss=0.2383, pruned_loss=0.04614, over 955687.87 frames. ], batch size: 65, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:32:12,194 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6328, 2.1894, 2.9157, 1.8390, 2.5633, 2.8622, 2.0908, 2.9428], + device='cuda:5'), covar=tensor([0.1287, 0.2036, 0.1578, 0.2331, 0.0941, 0.1529, 0.2579, 0.0883], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0205, 0.0192, 0.0188, 0.0173, 0.0211, 0.0216, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:32:27,501 INFO [finetune.py:976] (5/7) Epoch 29, batch 3950, loss[loss=0.1596, simple_loss=0.2326, pruned_loss=0.04333, over 4739.00 frames. ], tot_loss[loss=0.1636, simple_loss=0.2362, pruned_loss=0.04551, over 956865.17 frames. ], batch size: 23, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:32:55,727 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1375, 1.8328, 2.2987, 2.2643, 1.9677, 1.9569, 2.1885, 2.1620], + device='cuda:5'), covar=tensor([0.4610, 0.4238, 0.3301, 0.3733, 0.5149, 0.3986, 0.4762, 0.3006], + device='cuda:5'), in_proj_covar=tensor([0.0270, 0.0250, 0.0270, 0.0300, 0.0299, 0.0277, 0.0305, 0.0254], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:32:58,639 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164360.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:33:11,862 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.819e+01 1.491e+02 1.748e+02 1.970e+02 3.605e+02, threshold=3.496e+02, percent-clipped=1.0 +2023-03-27 11:33:11,878 INFO [finetune.py:976] (5/7) Epoch 29, batch 4000, loss[loss=0.2083, simple_loss=0.2734, pruned_loss=0.07161, over 4915.00 frames. ], tot_loss[loss=0.163, simple_loss=0.2352, pruned_loss=0.04538, over 953446.68 frames. ], batch size: 43, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:33:42,972 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164421.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:33:45,304 INFO [finetune.py:976] (5/7) Epoch 29, batch 4050, loss[loss=0.1875, simple_loss=0.2631, pruned_loss=0.05598, over 4888.00 frames. ], tot_loss[loss=0.1648, simple_loss=0.2378, pruned_loss=0.04592, over 952590.43 frames. ], batch size: 35, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:34:06,779 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164446.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:34:15,388 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9016, 1.8405, 1.7909, 1.8867, 1.3958, 3.2766, 1.6021, 1.9809], + device='cuda:5'), covar=tensor([0.3001, 0.2254, 0.1838, 0.2154, 0.1594, 0.0303, 0.2168, 0.1037], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0124, 0.0113, 0.0096, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 11:34:29,031 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.788e+01 1.600e+02 1.814e+02 2.182e+02 3.811e+02, threshold=3.628e+02, percent-clipped=1.0 +2023-03-27 11:34:29,047 INFO [finetune.py:976] (5/7) Epoch 29, batch 4100, loss[loss=0.1266, simple_loss=0.2096, pruned_loss=0.02184, over 4808.00 frames. ], tot_loss[loss=0.1659, simple_loss=0.2397, pruned_loss=0.04612, over 953223.22 frames. ], batch size: 25, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:35:04,641 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164511.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:35:13,897 INFO [finetune.py:976] (5/7) Epoch 29, batch 4150, loss[loss=0.1684, simple_loss=0.2391, pruned_loss=0.04891, over 4884.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2421, pruned_loss=0.04722, over 954278.69 frames. ], batch size: 35, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:35:42,368 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164568.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:35:46,474 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.025e+02 1.582e+02 1.835e+02 2.360e+02 4.097e+02, threshold=3.670e+02, percent-clipped=1.0 +2023-03-27 11:35:46,490 INFO [finetune.py:976] (5/7) Epoch 29, batch 4200, loss[loss=0.152, simple_loss=0.2351, pruned_loss=0.03445, over 4767.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.243, pruned_loss=0.0472, over 954903.00 frames. ], batch size: 28, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:35:48,884 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164578.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:35:53,146 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.44 vs. limit=5.0 +2023-03-27 11:36:20,294 INFO [finetune.py:976] (5/7) Epoch 29, batch 4250, loss[loss=0.1712, simple_loss=0.2386, pruned_loss=0.05189, over 4918.00 frames. ], tot_loss[loss=0.1666, simple_loss=0.2402, pruned_loss=0.0465, over 955697.85 frames. ], batch size: 38, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:36:23,236 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164629.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:36:29,284 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164639.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 11:36:43,150 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164658.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:36:53,779 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.101e+02 1.450e+02 1.663e+02 2.112e+02 3.483e+02, threshold=3.326e+02, percent-clipped=0.0 +2023-03-27 11:36:53,795 INFO [finetune.py:976] (5/7) Epoch 29, batch 4300, loss[loss=0.2119, simple_loss=0.2676, pruned_loss=0.07808, over 4195.00 frames. ], tot_loss[loss=0.1647, simple_loss=0.2374, pruned_loss=0.04602, over 956168.64 frames. ], batch size: 65, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:37:39,436 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164716.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:37:41,308 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164719.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:37:44,825 INFO [finetune.py:976] (5/7) Epoch 29, batch 4350, loss[loss=0.1665, simple_loss=0.224, pruned_loss=0.05447, over 4051.00 frames. ], tot_loss[loss=0.1622, simple_loss=0.2345, pruned_loss=0.04498, over 956488.52 frames. ], batch size: 17, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:37:56,916 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0435, 1.7518, 2.0958, 2.0612, 1.8480, 1.8139, 2.0794, 1.9984], + device='cuda:5'), covar=tensor([0.4398, 0.4001, 0.3310, 0.4014, 0.5005, 0.4395, 0.4799, 0.2885], + device='cuda:5'), in_proj_covar=tensor([0.0271, 0.0251, 0.0271, 0.0301, 0.0300, 0.0278, 0.0306, 0.0256], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:37:58,704 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164746.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:37:58,717 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164746.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:38:20,796 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.196e+01 1.395e+02 1.786e+02 2.060e+02 3.738e+02, threshold=3.572e+02, percent-clipped=2.0 +2023-03-27 11:38:20,812 INFO [finetune.py:976] (5/7) Epoch 29, batch 4400, loss[loss=0.1698, simple_loss=0.2507, pruned_loss=0.0444, over 4858.00 frames. ], tot_loss[loss=0.164, simple_loss=0.2361, pruned_loss=0.04588, over 955033.11 frames. ], batch size: 31, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:38:33,467 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=164794.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:38:34,763 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7664, 2.4869, 2.4281, 1.4297, 2.5503, 2.0225, 1.9634, 2.2920], + device='cuda:5'), covar=tensor([0.1200, 0.0823, 0.1972, 0.2127, 0.1688, 0.2385, 0.2154, 0.1261], + device='cuda:5'), in_proj_covar=tensor([0.0173, 0.0192, 0.0205, 0.0183, 0.0213, 0.0213, 0.0225, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:38:43,382 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164807.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:38:46,312 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164811.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:38:54,768 INFO [finetune.py:976] (5/7) Epoch 29, batch 4450, loss[loss=0.2123, simple_loss=0.2896, pruned_loss=0.0675, over 4919.00 frames. ], tot_loss[loss=0.167, simple_loss=0.24, pruned_loss=0.04701, over 954523.36 frames. ], batch size: 42, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:39:08,332 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-03-27 11:39:23,087 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=164859.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:39:37,135 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.552e+02 1.889e+02 2.217e+02 4.905e+02, threshold=3.778e+02, percent-clipped=1.0 +2023-03-27 11:39:37,151 INFO [finetune.py:976] (5/7) Epoch 29, batch 4500, loss[loss=0.2236, simple_loss=0.3031, pruned_loss=0.07198, over 4738.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2417, pruned_loss=0.04705, over 954639.76 frames. ], batch size: 59, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:40:11,932 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3956, 1.9348, 2.7881, 4.2999, 3.0204, 2.9683, 1.0630, 3.8568], + device='cuda:5'), covar=tensor([0.1469, 0.1325, 0.1247, 0.0447, 0.0721, 0.1220, 0.1890, 0.0303], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0114, 0.0131, 0.0163, 0.0099, 0.0134, 0.0123, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:40:22,142 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164924.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:40:22,678 INFO [finetune.py:976] (5/7) Epoch 29, batch 4550, loss[loss=0.1863, simple_loss=0.2576, pruned_loss=0.05748, over 4804.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2432, pruned_loss=0.04767, over 956142.75 frames. ], batch size: 39, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:40:28,157 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164934.0, num_to_drop=1, layers_to_drop={1} +2023-03-27 11:40:55,994 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.494e+02 1.764e+02 2.048e+02 3.220e+02, threshold=3.528e+02, percent-clipped=0.0 +2023-03-27 11:40:56,010 INFO [finetune.py:976] (5/7) Epoch 29, batch 4600, loss[loss=0.1939, simple_loss=0.2565, pruned_loss=0.06569, over 4897.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2427, pruned_loss=0.04782, over 953898.27 frames. ], batch size: 35, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:41:22,215 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165014.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:41:23,389 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165016.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:41:29,266 INFO [finetune.py:976] (5/7) Epoch 29, batch 4650, loss[loss=0.1677, simple_loss=0.2376, pruned_loss=0.04891, over 4714.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2418, pruned_loss=0.04842, over 954239.98 frames. ], batch size: 23, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:41:42,210 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-03-27 11:41:53,746 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=165064.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:42:01,329 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.062e+02 1.478e+02 1.798e+02 2.114e+02 1.110e+03, threshold=3.597e+02, percent-clipped=3.0 +2023-03-27 11:42:01,345 INFO [finetune.py:976] (5/7) Epoch 29, batch 4700, loss[loss=0.1188, simple_loss=0.1955, pruned_loss=0.02106, over 4752.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2399, pruned_loss=0.04763, over 956260.09 frames. ], batch size: 27, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:42:27,155 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165102.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:42:43,747 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165123.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:42:44,889 INFO [finetune.py:976] (5/7) Epoch 29, batch 4750, loss[loss=0.1472, simple_loss=0.2198, pruned_loss=0.03732, over 4797.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2379, pruned_loss=0.04738, over 955881.28 frames. ], batch size: 25, lr: 2.83e-03, grad_scale: 16.0 +2023-03-27 11:43:01,682 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7452, 2.6766, 2.3835, 2.9568, 2.6844, 2.5777, 2.6064, 3.5451], + device='cuda:5'), covar=tensor([0.3104, 0.4007, 0.3000, 0.3244, 0.3408, 0.2178, 0.3261, 0.1353], + device='cuda:5'), in_proj_covar=tensor([0.0288, 0.0263, 0.0239, 0.0273, 0.0260, 0.0232, 0.0259, 0.0238], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:43:03,505 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.7195, 2.5871, 2.2329, 2.8983, 2.7147, 2.4187, 2.9511, 2.7779], + device='cuda:5'), covar=tensor([0.1154, 0.1995, 0.2634, 0.1964, 0.2252, 0.1653, 0.2368, 0.1574], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0237, 0.0253, 0.0251, 0.0209, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:43:15,948 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9527, 1.9063, 1.5019, 1.8044, 1.8329, 1.7489, 1.8438, 2.4500], + device='cuda:5'), covar=tensor([0.3680, 0.3927, 0.3373, 0.3479, 0.3629, 0.2558, 0.3235, 0.1673], + device='cuda:5'), in_proj_covar=tensor([0.0289, 0.0264, 0.0239, 0.0274, 0.0261, 0.0232, 0.0259, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:43:21,525 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.579e+02 1.786e+02 2.031e+02 3.721e+02, threshold=3.572e+02, percent-clipped=1.0 +2023-03-27 11:43:21,541 INFO [finetune.py:976] (5/7) Epoch 29, batch 4800, loss[loss=0.1592, simple_loss=0.2383, pruned_loss=0.04005, over 4833.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.2393, pruned_loss=0.04775, over 953703.96 frames. ], batch size: 30, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:43:27,593 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165184.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:43:38,202 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-03-27 11:43:42,093 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0112, 1.5030, 2.5055, 3.9138, 2.6282, 2.6571, 0.9133, 3.4230], + device='cuda:5'), covar=tensor([0.1638, 0.1466, 0.1247, 0.0570, 0.0769, 0.1801, 0.1904, 0.0379], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0114, 0.0132, 0.0164, 0.0100, 0.0135, 0.0124, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:43:53,931 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165224.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:43:54,434 INFO [finetune.py:976] (5/7) Epoch 29, batch 4850, loss[loss=0.1865, simple_loss=0.2682, pruned_loss=0.05236, over 4853.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2423, pruned_loss=0.04798, over 956372.28 frames. ], batch size: 44, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:44:00,487 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165234.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 11:44:05,377 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1320, 1.8443, 2.2096, 2.2375, 2.0131, 1.9844, 2.2097, 2.1397], + device='cuda:5'), covar=tensor([0.4447, 0.4374, 0.3571, 0.4558, 0.5242, 0.4419, 0.5170, 0.3303], + device='cuda:5'), in_proj_covar=tensor([0.0271, 0.0251, 0.0270, 0.0301, 0.0301, 0.0277, 0.0307, 0.0256], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:44:24,478 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=165272.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:44:26,731 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.028e+02 1.742e+02 1.982e+02 2.317e+02 4.079e+02, threshold=3.965e+02, percent-clipped=3.0 +2023-03-27 11:44:26,747 INFO [finetune.py:976] (5/7) Epoch 29, batch 4900, loss[loss=0.1736, simple_loss=0.2347, pruned_loss=0.05622, over 4733.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2439, pruned_loss=0.04854, over 956349.39 frames. ], batch size: 26, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:44:41,266 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=165282.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:44:42,605 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.66 vs. limit=5.0 +2023-03-27 11:44:43,245 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.47 vs. limit=5.0 +2023-03-27 11:45:03,745 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165314.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:45:16,149 INFO [finetune.py:976] (5/7) Epoch 29, batch 4950, loss[loss=0.229, simple_loss=0.3, pruned_loss=0.07905, over 4857.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2464, pruned_loss=0.04956, over 957156.80 frames. ], batch size: 44, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:45:48,523 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=165362.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:45:56,865 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.088e+01 1.449e+02 1.743e+02 2.087e+02 3.629e+02, threshold=3.486e+02, percent-clipped=0.0 +2023-03-27 11:45:56,881 INFO [finetune.py:976] (5/7) Epoch 29, batch 5000, loss[loss=0.1496, simple_loss=0.2239, pruned_loss=0.03761, over 4821.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2445, pruned_loss=0.04915, over 955762.99 frames. ], batch size: 39, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:46:15,384 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165402.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:46:26,799 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7929, 1.3546, 0.8580, 1.7032, 2.2045, 1.5057, 1.6018, 1.6906], + device='cuda:5'), covar=tensor([0.1349, 0.1885, 0.1926, 0.1043, 0.1796, 0.1860, 0.1324, 0.1809], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0093, 0.0108, 0.0092, 0.0119, 0.0091, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:46:29,699 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4335, 1.5480, 1.3497, 1.4677, 1.8750, 1.6487, 1.5148, 1.3403], + device='cuda:5'), covar=tensor([0.0415, 0.0317, 0.0642, 0.0330, 0.0200, 0.0605, 0.0363, 0.0503], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0108, 0.0149, 0.0112, 0.0104, 0.0119, 0.0105, 0.0117], + device='cuda:5'), out_proj_covar=tensor([8.0229e-05, 8.2272e-05, 1.1613e-04, 8.5402e-05, 8.0484e-05, 8.7757e-05, + 7.8285e-05, 8.8578e-05], device='cuda:5') +2023-03-27 11:46:30,168 INFO [finetune.py:976] (5/7) Epoch 29, batch 5050, loss[loss=0.1542, simple_loss=0.224, pruned_loss=0.04217, over 4818.00 frames. ], tot_loss[loss=0.1676, simple_loss=0.2404, pruned_loss=0.0474, over 956420.06 frames. ], batch size: 41, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:46:35,075 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6058, 1.6777, 1.3859, 1.6010, 2.0711, 1.9770, 1.6819, 1.4911], + device='cuda:5'), covar=tensor([0.0415, 0.0339, 0.0654, 0.0324, 0.0178, 0.0517, 0.0333, 0.0428], + device='cuda:5'), in_proj_covar=tensor([0.0104, 0.0108, 0.0150, 0.0112, 0.0104, 0.0119, 0.0106, 0.0117], + device='cuda:5'), out_proj_covar=tensor([8.0292e-05, 8.2345e-05, 1.1625e-04, 8.5473e-05, 8.0521e-05, 8.7818e-05, + 7.8363e-05, 8.8655e-05], device='cuda:5') +2023-03-27 11:46:41,352 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.2887, 2.9281, 3.0322, 3.2062, 3.0823, 2.8770, 3.3380, 0.9177], + device='cuda:5'), covar=tensor([0.1237, 0.1141, 0.1270, 0.1262, 0.1752, 0.1994, 0.1175, 0.6096], + device='cuda:5'), in_proj_covar=tensor([0.0356, 0.0249, 0.0289, 0.0298, 0.0339, 0.0288, 0.0309, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:46:42,072 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-03-27 11:46:47,815 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=165450.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:46:52,113 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7845, 1.6384, 1.6662, 1.7307, 1.1312, 3.8252, 1.5335, 1.8221], + device='cuda:5'), covar=tensor([0.3286, 0.2572, 0.2101, 0.2401, 0.1836, 0.0169, 0.2450, 0.1321], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0117, 0.0121, 0.0124, 0.0114, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 11:46:54,710 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-03-27 11:47:03,404 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.397e+02 1.700e+02 2.095e+02 3.650e+02, threshold=3.400e+02, percent-clipped=1.0 +2023-03-27 11:47:03,420 INFO [finetune.py:976] (5/7) Epoch 29, batch 5100, loss[loss=0.178, simple_loss=0.2468, pruned_loss=0.05459, over 4829.00 frames. ], tot_loss[loss=0.165, simple_loss=0.2368, pruned_loss=0.04664, over 957231.60 frames. ], batch size: 40, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:47:06,335 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165479.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:47:46,455 INFO [finetune.py:976] (5/7) Epoch 29, batch 5150, loss[loss=0.2032, simple_loss=0.2593, pruned_loss=0.07359, over 3999.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2397, pruned_loss=0.04859, over 955392.05 frames. ], batch size: 65, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:48:04,768 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165551.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:48:12,619 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6344, 1.3999, 1.9811, 2.9956, 2.0115, 2.2658, 1.1632, 2.5850], + device='cuda:5'), covar=tensor([0.1781, 0.1529, 0.1296, 0.0736, 0.0897, 0.1339, 0.1788, 0.0534], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0114, 0.0132, 0.0163, 0.0100, 0.0135, 0.0124, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:48:20,245 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.818e+01 1.606e+02 1.856e+02 2.249e+02 3.990e+02, threshold=3.713e+02, percent-clipped=3.0 +2023-03-27 11:48:20,261 INFO [finetune.py:976] (5/7) Epoch 29, batch 5200, loss[loss=0.1658, simple_loss=0.242, pruned_loss=0.04478, over 4832.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2424, pruned_loss=0.04931, over 953146.90 frames. ], batch size: 33, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:48:25,039 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7453, 1.6854, 1.9225, 1.2935, 1.6455, 1.9542, 1.6610, 2.1243], + device='cuda:5'), covar=tensor([0.1131, 0.2107, 0.1495, 0.1738, 0.1027, 0.1210, 0.2553, 0.0734], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0207, 0.0195, 0.0190, 0.0175, 0.0213, 0.0220, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:48:45,785 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165612.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:48:53,939 INFO [finetune.py:976] (5/7) Epoch 29, batch 5250, loss[loss=0.2335, simple_loss=0.2885, pruned_loss=0.08923, over 4745.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.2438, pruned_loss=0.04951, over 953600.09 frames. ], batch size: 54, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:49:23,902 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.3128, 1.5619, 0.6551, 2.0112, 2.5451, 1.8373, 2.0052, 2.0031], + device='cuda:5'), covar=tensor([0.1277, 0.2000, 0.2177, 0.1097, 0.1691, 0.1770, 0.1285, 0.1865], + device='cuda:5'), in_proj_covar=tensor([0.0089, 0.0094, 0.0108, 0.0093, 0.0120, 0.0092, 0.0097, 0.0088], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 11:49:26,773 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.319e+01 1.511e+02 1.772e+02 2.250e+02 3.554e+02, threshold=3.544e+02, percent-clipped=0.0 +2023-03-27 11:49:26,788 INFO [finetune.py:976] (5/7) Epoch 29, batch 5300, loss[loss=0.1585, simple_loss=0.2395, pruned_loss=0.03871, over 4757.00 frames. ], tot_loss[loss=0.172, simple_loss=0.245, pruned_loss=0.04956, over 954775.29 frames. ], batch size: 27, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:49:30,291 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5930, 3.7458, 3.5810, 1.6449, 3.9489, 2.9191, 0.7091, 2.7149], + device='cuda:5'), covar=tensor([0.2489, 0.1717, 0.1631, 0.3535, 0.1000, 0.1018, 0.4640, 0.1403], + device='cuda:5'), in_proj_covar=tensor([0.0151, 0.0179, 0.0160, 0.0131, 0.0163, 0.0124, 0.0149, 0.0126], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 11:50:10,067 INFO [finetune.py:976] (5/7) Epoch 29, batch 5350, loss[loss=0.1403, simple_loss=0.2088, pruned_loss=0.03587, over 4834.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.2448, pruned_loss=0.04878, over 955034.72 frames. ], batch size: 47, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:50:10,163 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5881, 1.4525, 1.8550, 2.9504, 2.0137, 2.1114, 1.0220, 2.5635], + device='cuda:5'), covar=tensor([0.1692, 0.1353, 0.1266, 0.0649, 0.0794, 0.1358, 0.1709, 0.0497], + device='cuda:5'), in_proj_covar=tensor([0.0099, 0.0114, 0.0132, 0.0163, 0.0099, 0.0134, 0.0123, 0.0100], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 11:50:34,276 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165750.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:50:59,737 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 1.485e+02 1.788e+02 2.126e+02 3.231e+02, threshold=3.576e+02, percent-clipped=0.0 +2023-03-27 11:50:59,753 INFO [finetune.py:976] (5/7) Epoch 29, batch 5400, loss[loss=0.162, simple_loss=0.2458, pruned_loss=0.03908, over 4903.00 frames. ], tot_loss[loss=0.1689, simple_loss=0.2423, pruned_loss=0.04776, over 954444.64 frames. ], batch size: 36, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:51:02,244 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.4423, 3.8648, 4.0732, 4.2839, 4.2260, 3.9154, 4.5420, 1.4066], + device='cuda:5'), covar=tensor([0.0818, 0.0945, 0.0959, 0.0950, 0.1156, 0.1906, 0.0684, 0.5989], + device='cuda:5'), in_proj_covar=tensor([0.0356, 0.0249, 0.0289, 0.0298, 0.0339, 0.0288, 0.0307, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:51:02,264 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165779.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:51:08,903 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-27 11:51:18,399 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165803.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:51:24,183 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165811.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:51:33,005 INFO [finetune.py:976] (5/7) Epoch 29, batch 5450, loss[loss=0.1454, simple_loss=0.2114, pruned_loss=0.0397, over 4729.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2405, pruned_loss=0.04801, over 956784.30 frames. ], batch size: 23, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:51:34,302 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=165827.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:51:59,788 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165864.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:52:06,352 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.128e+01 1.456e+02 1.721e+02 2.001e+02 5.868e+02, threshold=3.442e+02, percent-clipped=2.0 +2023-03-27 11:52:06,368 INFO [finetune.py:976] (5/7) Epoch 29, batch 5500, loss[loss=0.1774, simple_loss=0.2407, pruned_loss=0.05706, over 4931.00 frames. ], tot_loss[loss=0.167, simple_loss=0.2382, pruned_loss=0.04786, over 955274.89 frames. ], batch size: 38, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:52:26,489 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-03-27 11:52:26,574 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.66 vs. limit=5.0 +2023-03-27 11:52:27,325 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165907.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:52:40,105 INFO [finetune.py:976] (5/7) Epoch 29, batch 5550, loss[loss=0.2003, simple_loss=0.267, pruned_loss=0.06681, over 4771.00 frames. ], tot_loss[loss=0.168, simple_loss=0.2397, pruned_loss=0.04816, over 956691.88 frames. ], batch size: 59, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:53:22,712 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.003e+02 1.570e+02 1.926e+02 2.203e+02 4.633e+02, threshold=3.853e+02, percent-clipped=1.0 +2023-03-27 11:53:22,729 INFO [finetune.py:976] (5/7) Epoch 29, batch 5600, loss[loss=0.1796, simple_loss=0.2608, pruned_loss=0.04914, over 4907.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2423, pruned_loss=0.04818, over 954564.50 frames. ], batch size: 37, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:53:31,051 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3566, 1.2746, 1.4237, 0.7404, 1.4300, 1.5830, 1.6388, 1.2751], + device='cuda:5'), covar=tensor([0.1000, 0.0797, 0.0650, 0.0546, 0.0595, 0.0722, 0.0374, 0.0851], + device='cuda:5'), in_proj_covar=tensor([0.0122, 0.0148, 0.0132, 0.0122, 0.0133, 0.0131, 0.0142, 0.0152], + device='cuda:5'), out_proj_covar=tensor([8.8921e-05, 1.0579e-04, 9.3714e-05, 8.5335e-05, 9.2815e-05, 9.2879e-05, + 1.0103e-04, 1.0886e-04], device='cuda:5') +2023-03-27 11:53:44,619 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-03-27 11:53:54,008 INFO [finetune.py:976] (5/7) Epoch 29, batch 5650, loss[loss=0.1941, simple_loss=0.2654, pruned_loss=0.06143, over 4217.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2445, pruned_loss=0.04823, over 954638.39 frames. ], batch size: 65, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:53:59,067 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.73 vs. limit=5.0 +2023-03-27 11:53:59,214 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.42 vs. limit=5.0 +2023-03-27 11:54:01,166 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6094, 1.5295, 2.0533, 1.8425, 1.7231, 3.2267, 1.4546, 1.6341], + device='cuda:5'), covar=tensor([0.0930, 0.1577, 0.1271, 0.0812, 0.1435, 0.0247, 0.1324, 0.1617], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0076, 0.0091, 0.0081, 0.0086, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 11:54:11,187 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166054.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 11:54:22,478 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.1800, 2.7379, 3.3904, 2.3003, 2.8556, 3.2550, 2.4657, 3.2419], + device='cuda:5'), covar=tensor([0.0926, 0.1720, 0.1161, 0.1709, 0.0945, 0.1171, 0.2234, 0.0942], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0206, 0.0192, 0.0187, 0.0173, 0.0212, 0.0218, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:54:23,585 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.094e+01 1.435e+02 1.719e+02 2.053e+02 3.744e+02, threshold=3.437e+02, percent-clipped=0.0 +2023-03-27 11:54:23,601 INFO [finetune.py:976] (5/7) Epoch 29, batch 5700, loss[loss=0.1231, simple_loss=0.1959, pruned_loss=0.02518, over 4426.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.24, pruned_loss=0.04721, over 939297.54 frames. ], batch size: 19, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:54:50,333 INFO [finetune.py:976] (5/7) Epoch 30, batch 0, loss[loss=0.2024, simple_loss=0.2839, pruned_loss=0.06048, over 4916.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2839, pruned_loss=0.06048, over 4916.00 frames. ], batch size: 33, lr: 2.82e-03, grad_scale: 32.0 +2023-03-27 11:54:50,333 INFO [finetune.py:1001] (5/7) Computing validation loss +2023-03-27 11:54:52,272 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9498, 1.1853, 2.0651, 1.9930, 1.8708, 1.7817, 1.8531, 2.0332], + device='cuda:5'), covar=tensor([0.4041, 0.4114, 0.3600, 0.3723, 0.5119, 0.3946, 0.4747, 0.3095], + device='cuda:5'), in_proj_covar=tensor([0.0270, 0.0250, 0.0269, 0.0301, 0.0300, 0.0277, 0.0306, 0.0255], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:54:58,208 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1706, 1.9373, 1.8085, 1.7780, 1.8930, 1.8698, 1.9058, 2.5788], + device='cuda:5'), covar=tensor([0.3613, 0.4329, 0.3393, 0.3643, 0.3925, 0.2536, 0.3741, 0.1745], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0264, 0.0239, 0.0274, 0.0261, 0.0232, 0.0259, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:55:06,737 INFO [finetune.py:1010] (5/7) Epoch 30, validation: loss=0.1598, simple_loss=0.2264, pruned_loss=0.04658, over 2265189.00 frames. +2023-03-27 11:55:06,737 INFO [finetune.py:1011] (5/7) Maximum memory allocated so far is 6648MB +2023-03-27 11:55:11,875 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166106.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:55:21,136 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166115.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 11:55:43,715 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166147.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:55:52,370 INFO [finetune.py:976] (5/7) Epoch 30, batch 50, loss[loss=0.1464, simple_loss=0.231, pruned_loss=0.03094, over 4827.00 frames. ], tot_loss[loss=0.172, simple_loss=0.2481, pruned_loss=0.04801, over 216524.10 frames. ], batch size: 47, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:56:02,574 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166159.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:56:16,047 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.060e+01 1.403e+02 1.686e+02 1.983e+02 3.736e+02, threshold=3.372e+02, percent-clipped=1.0 +2023-03-27 11:56:34,675 INFO [finetune.py:976] (5/7) Epoch 30, batch 100, loss[loss=0.1413, simple_loss=0.2178, pruned_loss=0.0324, over 4822.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.2406, pruned_loss=0.0469, over 380455.18 frames. ], batch size: 39, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:56:38,188 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166207.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:56:39,321 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166208.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:57:01,540 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-03-27 11:57:07,206 INFO [finetune.py:976] (5/7) Epoch 30, batch 150, loss[loss=0.1574, simple_loss=0.2269, pruned_loss=0.04398, over 4843.00 frames. ], tot_loss[loss=0.1639, simple_loss=0.2355, pruned_loss=0.04617, over 509014.52 frames. ], batch size: 47, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:57:08,969 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=166255.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:57:21,829 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.042e+02 1.442e+02 1.804e+02 2.095e+02 4.016e+02, threshold=3.609e+02, percent-clipped=1.0 +2023-03-27 11:57:39,808 INFO [finetune.py:976] (5/7) Epoch 30, batch 200, loss[loss=0.1516, simple_loss=0.2283, pruned_loss=0.03742, over 4808.00 frames. ], tot_loss[loss=0.1622, simple_loss=0.2334, pruned_loss=0.04552, over 607558.40 frames. ], batch size: 41, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:57:42,831 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166307.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:57:48,725 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.18 vs. limit=5.0 +2023-03-27 11:58:14,802 INFO [finetune.py:976] (5/7) Epoch 30, batch 250, loss[loss=0.1784, simple_loss=0.2631, pruned_loss=0.0469, over 4757.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2379, pruned_loss=0.04731, over 684234.74 frames. ], batch size: 54, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:58:18,460 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5113, 2.5341, 2.4158, 2.5123, 2.3389, 4.9791, 2.4464, 2.8309], + device='cuda:5'), covar=tensor([0.2747, 0.2125, 0.1727, 0.1982, 0.1274, 0.0114, 0.1924, 0.1011], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0117, 0.0121, 0.0125, 0.0114, 0.0096, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 11:58:26,041 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166368.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:58:26,936 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-03-27 11:58:30,118 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.508e+02 1.830e+02 2.341e+02 4.348e+02, threshold=3.661e+02, percent-clipped=2.0 +2023-03-27 11:58:39,832 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1306, 1.9646, 2.0166, 0.8490, 2.3274, 2.4823, 2.2225, 1.8306], + device='cuda:5'), covar=tensor([0.0890, 0.0749, 0.0567, 0.0753, 0.0478, 0.0738, 0.0501, 0.0812], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0147, 0.0131, 0.0121, 0.0131, 0.0130, 0.0141, 0.0151], + device='cuda:5'), out_proj_covar=tensor([8.7934e-05, 1.0476e-04, 9.2951e-05, 8.4790e-05, 9.1881e-05, 9.2105e-05, + 1.0031e-04, 1.0788e-04], device='cuda:5') +2023-03-27 11:58:46,439 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1994, 2.1138, 1.5904, 2.0309, 2.0878, 1.8116, 2.7647, 2.1585], + device='cuda:5'), covar=tensor([0.1580, 0.1973, 0.3496, 0.3083, 0.3045, 0.1931, 0.2253, 0.1975], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0193, 0.0240, 0.0255, 0.0253, 0.0211, 0.0218, 0.0205], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 11:58:48,139 INFO [finetune.py:976] (5/7) Epoch 30, batch 300, loss[loss=0.1322, simple_loss=0.215, pruned_loss=0.02472, over 4913.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2406, pruned_loss=0.04788, over 743535.21 frames. ], batch size: 36, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:58:50,088 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166406.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:58:52,502 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166410.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 11:59:21,479 INFO [finetune.py:976] (5/7) Epoch 30, batch 350, loss[loss=0.1803, simple_loss=0.2584, pruned_loss=0.0511, over 4918.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2422, pruned_loss=0.04815, over 790792.13 frames. ], batch size: 33, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:59:22,660 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=166454.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:59:25,726 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166459.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:59:37,687 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.476e+02 1.813e+02 2.161e+02 3.890e+02, threshold=3.626e+02, percent-clipped=2.0 +2023-03-27 11:59:41,437 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166481.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:59:55,112 INFO [finetune.py:976] (5/7) Epoch 30, batch 400, loss[loss=0.1768, simple_loss=0.2409, pruned_loss=0.05639, over 4817.00 frames. ], tot_loss[loss=0.1704, simple_loss=0.2436, pruned_loss=0.04861, over 829052.00 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 11:59:55,185 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166503.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:59:58,013 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=166507.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:59:59,269 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166509.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 11:59:59,348 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-03-27 12:00:20,212 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166531.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:00:35,354 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166542.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:00:45,988 INFO [finetune.py:976] (5/7) Epoch 30, batch 450, loss[loss=0.1633, simple_loss=0.239, pruned_loss=0.04374, over 4846.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.2418, pruned_loss=0.04771, over 857775.69 frames. ], batch size: 44, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:00:57,407 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166570.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:01:00,792 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.283e+01 1.415e+02 1.694e+02 2.083e+02 4.695e+02, threshold=3.388e+02, percent-clipped=2.0 +2023-03-27 12:01:08,425 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2941, 2.1447, 1.7561, 2.0985, 2.1849, 1.9278, 2.4905, 2.2522], + device='cuda:5'), covar=tensor([0.1198, 0.1768, 0.2700, 0.2314, 0.2157, 0.1552, 0.2216, 0.1498], + device='cuda:5'), in_proj_covar=tensor([0.0191, 0.0191, 0.0238, 0.0254, 0.0251, 0.0210, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:01:22,340 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166592.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:01:32,864 INFO [finetune.py:976] (5/7) Epoch 30, batch 500, loss[loss=0.1691, simple_loss=0.2445, pruned_loss=0.04689, over 4893.00 frames. ], tot_loss[loss=0.1667, simple_loss=0.2389, pruned_loss=0.04727, over 879923.80 frames. ], batch size: 35, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:01:46,195 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.3068, 2.9328, 3.0630, 3.2406, 3.0791, 2.8693, 3.3530, 1.0285], + device='cuda:5'), covar=tensor([0.1141, 0.1065, 0.1121, 0.1146, 0.1791, 0.2016, 0.1153, 0.5702], + device='cuda:5'), in_proj_covar=tensor([0.0355, 0.0249, 0.0288, 0.0298, 0.0339, 0.0288, 0.0306, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:02:06,082 INFO [finetune.py:976] (5/7) Epoch 30, batch 550, loss[loss=0.1774, simple_loss=0.2405, pruned_loss=0.05716, over 4905.00 frames. ], tot_loss[loss=0.1648, simple_loss=0.2364, pruned_loss=0.04663, over 897293.98 frames. ], batch size: 36, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:02:12,198 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166663.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:02:20,414 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 7.871e+01 1.519e+02 1.786e+02 2.255e+02 3.834e+02, threshold=3.573e+02, percent-clipped=3.0 +2023-03-27 12:02:39,362 INFO [finetune.py:976] (5/7) Epoch 30, batch 600, loss[loss=0.1623, simple_loss=0.2424, pruned_loss=0.04113, over 4833.00 frames. ], tot_loss[loss=0.1656, simple_loss=0.237, pruned_loss=0.04705, over 906587.44 frames. ], batch size: 39, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:02:43,709 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166710.0, num_to_drop=1, layers_to_drop={2} +2023-03-27 12:02:45,157 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-03-27 12:03:00,925 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166735.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:03:12,732 INFO [finetune.py:976] (5/7) Epoch 30, batch 650, loss[loss=0.1865, simple_loss=0.2718, pruned_loss=0.05061, over 4862.00 frames. ], tot_loss[loss=0.168, simple_loss=0.24, pruned_loss=0.04797, over 917612.37 frames. ], batch size: 44, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:03:15,791 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=166758.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 12:03:19,477 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166764.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:03:26,541 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.552e+02 1.821e+02 2.218e+02 3.611e+02, threshold=3.642e+02, percent-clipped=1.0 +2023-03-27 12:03:41,568 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166796.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:03:45,828 INFO [finetune.py:976] (5/7) Epoch 30, batch 700, loss[loss=0.1894, simple_loss=0.2735, pruned_loss=0.05261, over 4816.00 frames. ], tot_loss[loss=0.1693, simple_loss=0.2423, pruned_loss=0.04822, over 926016.40 frames. ], batch size: 51, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:03:45,913 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166803.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:04:00,331 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166825.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:04:05,752 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0113, 1.9159, 1.6773, 1.8537, 1.8407, 1.8409, 1.8992, 2.5045], + device='cuda:5'), covar=tensor([0.3763, 0.4090, 0.3266, 0.3929, 0.4462, 0.2513, 0.3763, 0.1832], + device='cuda:5'), in_proj_covar=tensor([0.0292, 0.0266, 0.0241, 0.0277, 0.0264, 0.0234, 0.0262, 0.0241], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:04:08,579 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166837.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:04:18,068 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=166851.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:04:19,223 INFO [finetune.py:976] (5/7) Epoch 30, batch 750, loss[loss=0.1585, simple_loss=0.243, pruned_loss=0.03698, over 4902.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2434, pruned_loss=0.04814, over 933289.08 frames. ], batch size: 43, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:04:27,158 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166865.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:04:33,233 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.605e+01 1.409e+02 1.752e+02 2.054e+02 3.389e+02, threshold=3.504e+02, percent-clipped=0.0 +2023-03-27 12:04:41,703 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166887.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:04:52,670 INFO [finetune.py:976] (5/7) Epoch 30, batch 800, loss[loss=0.1604, simple_loss=0.2359, pruned_loss=0.04241, over 4892.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.2431, pruned_loss=0.04803, over 939904.81 frames. ], batch size: 43, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:05:09,487 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166929.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:05:32,105 INFO [finetune.py:976] (5/7) Epoch 30, batch 850, loss[loss=0.1383, simple_loss=0.2166, pruned_loss=0.02995, over 4834.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2415, pruned_loss=0.04763, over 942969.19 frames. ], batch size: 30, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:05:42,368 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166963.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:05:53,841 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.050e+02 1.507e+02 1.769e+02 2.105e+02 4.355e+02, threshold=3.539e+02, percent-clipped=2.0 +2023-03-27 12:06:07,540 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166990.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:06:16,006 INFO [finetune.py:976] (5/7) Epoch 30, batch 900, loss[loss=0.1467, simple_loss=0.2193, pruned_loss=0.03708, over 4748.00 frames. ], tot_loss[loss=0.1654, simple_loss=0.2378, pruned_loss=0.04649, over 945521.81 frames. ], batch size: 27, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:06:23,183 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167011.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:06:31,790 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([0.3098, 1.3917, 1.4809, 0.7579, 1.4782, 1.6821, 1.7038, 1.3728], + device='cuda:5'), covar=tensor([0.0917, 0.0655, 0.0538, 0.0519, 0.0483, 0.0579, 0.0306, 0.0624], + device='cuda:5'), in_proj_covar=tensor([0.0121, 0.0146, 0.0130, 0.0121, 0.0131, 0.0130, 0.0141, 0.0151], + device='cuda:5'), out_proj_covar=tensor([8.8058e-05, 1.0459e-04, 9.2380e-05, 8.4427e-05, 9.1653e-05, 9.1849e-05, + 9.9950e-05, 1.0790e-04], device='cuda:5') +2023-03-27 12:06:59,306 INFO [finetune.py:976] (5/7) Epoch 30, batch 950, loss[loss=0.2194, simple_loss=0.286, pruned_loss=0.07644, over 4841.00 frames. ], tot_loss[loss=0.1661, simple_loss=0.2379, pruned_loss=0.04716, over 947165.70 frames. ], batch size: 49, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:07:13,663 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.522e+02 1.848e+02 2.259e+02 3.601e+02, threshold=3.696e+02, percent-clipped=1.0 +2023-03-27 12:07:24,436 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167091.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:07:31,405 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167100.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:07:31,598 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.90 vs. limit=5.0 +2023-03-27 12:07:33,105 INFO [finetune.py:976] (5/7) Epoch 30, batch 1000, loss[loss=0.1995, simple_loss=0.2549, pruned_loss=0.07209, over 4727.00 frames. ], tot_loss[loss=0.1677, simple_loss=0.2394, pruned_loss=0.04801, over 949615.89 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:07:33,851 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167104.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:07:44,461 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167120.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:07:51,838 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.1544, 1.2316, 1.3918, 1.3419, 1.3464, 2.1711, 1.1387, 1.3044], + device='cuda:5'), covar=tensor([0.0928, 0.1646, 0.1427, 0.0877, 0.1502, 0.0453, 0.1486, 0.1618], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0076, 0.0092, 0.0081, 0.0086, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 12:07:55,372 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167137.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:08:06,884 INFO [finetune.py:976] (5/7) Epoch 30, batch 1050, loss[loss=0.1987, simple_loss=0.275, pruned_loss=0.06123, over 4845.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2418, pruned_loss=0.04826, over 951142.58 frames. ], batch size: 47, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:08:11,876 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167161.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:08:14,793 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167165.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:08:14,816 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167165.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:08:21,246 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.094e+02 1.490e+02 1.818e+02 2.230e+02 3.401e+02, threshold=3.636e+02, percent-clipped=0.0 +2023-03-27 12:08:27,352 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167185.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:08:28,600 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167187.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:08:32,147 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.4236, 2.3738, 2.3725, 1.6826, 2.3172, 2.6755, 2.5967, 2.0188], + device='cuda:5'), covar=tensor([0.0611, 0.0643, 0.0768, 0.0909, 0.1144, 0.0703, 0.0622, 0.1157], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0138, 0.0142, 0.0119, 0.0130, 0.0140, 0.0140, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:08:34,557 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([4.1172, 3.6518, 3.7874, 3.9522, 3.8795, 3.5399, 4.2090, 1.3823], + device='cuda:5'), covar=tensor([0.0835, 0.0777, 0.0851, 0.1052, 0.1286, 0.1699, 0.0676, 0.5611], + device='cuda:5'), in_proj_covar=tensor([0.0356, 0.0248, 0.0288, 0.0299, 0.0340, 0.0288, 0.0307, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:08:39,895 INFO [finetune.py:976] (5/7) Epoch 30, batch 1100, loss[loss=0.1613, simple_loss=0.2475, pruned_loss=0.03752, over 4802.00 frames. ], tot_loss[loss=0.1696, simple_loss=0.243, pruned_loss=0.0481, over 952311.39 frames. ], batch size: 33, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:08:46,986 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167213.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:08:48,289 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3205, 1.2457, 1.4615, 1.0190, 1.3255, 1.3912, 1.2359, 1.5060], + device='cuda:5'), covar=tensor([0.1022, 0.2089, 0.1252, 0.1432, 0.0728, 0.0994, 0.2667, 0.0736], + device='cuda:5'), in_proj_covar=tensor([0.0192, 0.0207, 0.0194, 0.0190, 0.0174, 0.0213, 0.0219, 0.0199], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:09:01,419 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167235.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:09:11,631 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1450, 1.8345, 2.2231, 2.1677, 1.9828, 1.9722, 2.1271, 2.1038], + device='cuda:5'), covar=tensor([0.4744, 0.4330, 0.3261, 0.3894, 0.4879, 0.4024, 0.4891, 0.3083], + device='cuda:5'), in_proj_covar=tensor([0.0272, 0.0252, 0.0271, 0.0302, 0.0303, 0.0281, 0.0308, 0.0256], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:09:13,765 INFO [finetune.py:976] (5/7) Epoch 30, batch 1150, loss[loss=0.1597, simple_loss=0.2367, pruned_loss=0.04136, over 4743.00 frames. ], tot_loss[loss=0.1688, simple_loss=0.2428, pruned_loss=0.04738, over 953408.24 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:09:22,736 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=3.84 vs. limit=5.0 +2023-03-27 12:09:28,395 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.511e+02 1.772e+02 2.131e+02 4.281e+02, threshold=3.544e+02, percent-clipped=3.0 +2023-03-27 12:09:34,973 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167285.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:09:47,293 INFO [finetune.py:976] (5/7) Epoch 30, batch 1200, loss[loss=0.1633, simple_loss=0.2429, pruned_loss=0.04189, over 4903.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2411, pruned_loss=0.04678, over 955556.48 frames. ], batch size: 37, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:10:03,390 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2190, 2.0906, 1.8784, 2.3415, 2.5852, 2.2527, 2.1458, 1.6531], + device='cuda:5'), covar=tensor([0.1974, 0.1905, 0.1849, 0.1526, 0.1887, 0.1107, 0.2013, 0.1778], + device='cuda:5'), in_proj_covar=tensor([0.0248, 0.0213, 0.0217, 0.0200, 0.0247, 0.0192, 0.0219, 0.0207], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:10:15,217 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9309, 1.7820, 1.9758, 1.2266, 1.9703, 1.9917, 1.9350, 1.6200], + device='cuda:5'), covar=tensor([0.0639, 0.0778, 0.0635, 0.0865, 0.0885, 0.0644, 0.0615, 0.1229], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0141, 0.0118, 0.0129, 0.0139, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:10:20,450 INFO [finetune.py:976] (5/7) Epoch 30, batch 1250, loss[loss=0.1517, simple_loss=0.221, pruned_loss=0.04119, over 4829.00 frames. ], tot_loss[loss=0.1663, simple_loss=0.2391, pruned_loss=0.04674, over 954353.37 frames. ], batch size: 30, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:10:36,983 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.068e+02 1.462e+02 1.711e+02 2.091e+02 4.240e+02, threshold=3.422e+02, percent-clipped=1.0 +2023-03-27 12:10:56,485 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167391.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:11:09,176 INFO [finetune.py:976] (5/7) Epoch 30, batch 1300, loss[loss=0.1815, simple_loss=0.2422, pruned_loss=0.06039, over 4771.00 frames. ], tot_loss[loss=0.1644, simple_loss=0.2364, pruned_loss=0.04618, over 954954.57 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:11:24,589 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167420.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:11:32,428 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167432.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:11:38,998 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167439.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:11:55,875 INFO [finetune.py:976] (5/7) Epoch 30, batch 1350, loss[loss=0.2071, simple_loss=0.2766, pruned_loss=0.06883, over 4771.00 frames. ], tot_loss[loss=0.1651, simple_loss=0.2368, pruned_loss=0.04667, over 954346.55 frames. ], batch size: 54, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:11:58,263 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167456.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:12:00,695 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167460.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:12:07,070 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167468.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:12:11,230 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.213e+01 1.429e+02 1.665e+02 1.960e+02 3.889e+02, threshold=3.329e+02, percent-clipped=2.0 +2023-03-27 12:12:18,935 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0268, 2.0621, 1.6656, 1.9209, 1.9193, 1.8382, 1.9249, 2.6364], + device='cuda:5'), covar=tensor([0.3711, 0.3618, 0.3266, 0.3656, 0.3952, 0.2589, 0.3668, 0.1673], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0264, 0.0239, 0.0275, 0.0262, 0.0233, 0.0260, 0.0239], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:12:23,212 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167493.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:12:29,688 INFO [finetune.py:976] (5/7) Epoch 30, batch 1400, loss[loss=0.2063, simple_loss=0.2835, pruned_loss=0.06457, over 4858.00 frames. ], tot_loss[loss=0.1677, simple_loss=0.2407, pruned_loss=0.04733, over 954878.14 frames. ], batch size: 44, lr: 2.81e-03, grad_scale: 64.0 +2023-03-27 12:12:55,739 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=96, metric=1.10 vs. limit=2.0 +2023-03-27 12:12:56,288 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1797, 2.0341, 1.5552, 0.6881, 1.6887, 1.8067, 1.6889, 1.9276], + device='cuda:5'), covar=tensor([0.0938, 0.0617, 0.1519, 0.1774, 0.1284, 0.2078, 0.1940, 0.0786], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0190, 0.0202, 0.0181, 0.0209, 0.0212, 0.0225, 0.0197], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:13:02,948 INFO [finetune.py:976] (5/7) Epoch 30, batch 1450, loss[loss=0.1583, simple_loss=0.2297, pruned_loss=0.0435, over 4798.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.241, pruned_loss=0.04685, over 953560.47 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:13:19,237 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.535e+02 1.844e+02 2.174e+02 4.375e+02, threshold=3.689e+02, percent-clipped=4.0 +2023-03-27 12:13:25,327 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167585.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:13:30,686 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5271, 1.3890, 1.8791, 1.6816, 1.5597, 3.3504, 1.3682, 1.4746], + device='cuda:5'), covar=tensor([0.0984, 0.1901, 0.1052, 0.0962, 0.1634, 0.0240, 0.1492, 0.1843], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0083, 0.0073, 0.0076, 0.0092, 0.0081, 0.0086, 0.0081], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 12:13:36,787 INFO [finetune.py:976] (5/7) Epoch 30, batch 1500, loss[loss=0.1997, simple_loss=0.2482, pruned_loss=0.0756, over 4119.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2419, pruned_loss=0.04697, over 954269.69 frames. ], batch size: 65, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:13:57,379 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167633.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:14:10,434 INFO [finetune.py:976] (5/7) Epoch 30, batch 1550, loss[loss=0.1505, simple_loss=0.2315, pruned_loss=0.03481, over 4936.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2424, pruned_loss=0.04692, over 953853.31 frames. ], batch size: 33, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:14:13,580 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8202, 1.0734, 1.8510, 1.8167, 1.6968, 1.6415, 1.7334, 1.8061], + device='cuda:5'), covar=tensor([0.3580, 0.3565, 0.3053, 0.3308, 0.4174, 0.3615, 0.3901, 0.2856], + device='cuda:5'), in_proj_covar=tensor([0.0269, 0.0250, 0.0268, 0.0299, 0.0300, 0.0278, 0.0306, 0.0254], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:14:26,676 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.931e+01 1.416e+02 1.751e+02 2.105e+02 4.024e+02, threshold=3.503e+02, percent-clipped=1.0 +2023-03-27 12:14:44,010 INFO [finetune.py:976] (5/7) Epoch 30, batch 1600, loss[loss=0.1713, simple_loss=0.2354, pruned_loss=0.05362, over 4821.00 frames. ], tot_loss[loss=0.1661, simple_loss=0.2398, pruned_loss=0.04622, over 954997.60 frames. ], batch size: 39, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:14:54,741 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167719.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:14:55,957 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8244, 1.7720, 1.6682, 1.7646, 1.6942, 3.7994, 1.6135, 2.0220], + device='cuda:5'), covar=tensor([0.3223, 0.2560, 0.2128, 0.2327, 0.1480, 0.0186, 0.2519, 0.1241], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0124, 0.0113, 0.0095, 0.0094, 0.0095], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 12:15:03,493 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9578, 1.8602, 1.6518, 1.9822, 2.3998, 2.0884, 1.8028, 1.5969], + device='cuda:5'), covar=tensor([0.1776, 0.1753, 0.1690, 0.1364, 0.1375, 0.1047, 0.2082, 0.1579], + device='cuda:5'), in_proj_covar=tensor([0.0249, 0.0213, 0.0217, 0.0200, 0.0247, 0.0193, 0.0219, 0.0208], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:15:17,588 INFO [finetune.py:976] (5/7) Epoch 30, batch 1650, loss[loss=0.1661, simple_loss=0.2337, pruned_loss=0.04927, over 4899.00 frames. ], tot_loss[loss=0.1642, simple_loss=0.2373, pruned_loss=0.04557, over 956315.99 frames. ], batch size: 35, lr: 2.81e-03, grad_scale: 32.0 +2023-03-27 12:15:19,513 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167756.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:15:21,892 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167760.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:15:26,700 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167767.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:15:32,526 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.722e+01 1.428e+02 1.633e+02 1.926e+02 4.440e+02, threshold=3.266e+02, percent-clipped=1.0 +2023-03-27 12:15:36,046 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167780.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:15:41,287 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167788.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:15:55,584 INFO [finetune.py:976] (5/7) Epoch 30, batch 1700, loss[loss=0.1776, simple_loss=0.2434, pruned_loss=0.05596, over 4683.00 frames. ], tot_loss[loss=0.1624, simple_loss=0.2352, pruned_loss=0.04476, over 956917.12 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:15:56,725 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167804.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:16:03,528 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-03-27 12:16:03,660 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=167808.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:16:14,407 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2843, 1.4257, 1.6815, 1.5077, 1.6063, 3.0189, 1.4345, 1.5556], + device='cuda:5'), covar=tensor([0.1041, 0.1879, 0.1127, 0.1003, 0.1654, 0.0296, 0.1514, 0.1806], + device='cuda:5'), in_proj_covar=tensor([0.0075, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 12:16:25,656 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167828.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:16:25,669 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1467, 2.0824, 1.7161, 1.7649, 2.1347, 1.8594, 2.2406, 2.2208], + device='cuda:5'), covar=tensor([0.1297, 0.1863, 0.2775, 0.2642, 0.2314, 0.1655, 0.2869, 0.1488], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0239, 0.0253, 0.0250, 0.0210, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:16:42,121 INFO [finetune.py:976] (5/7) Epoch 30, batch 1750, loss[loss=0.1666, simple_loss=0.2538, pruned_loss=0.03972, over 4821.00 frames. ], tot_loss[loss=0.1653, simple_loss=0.2382, pruned_loss=0.04621, over 956136.19 frames. ], batch size: 40, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:16:57,048 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.486e+02 1.829e+02 2.140e+02 4.770e+02, threshold=3.658e+02, percent-clipped=2.0 +2023-03-27 12:17:25,522 INFO [finetune.py:976] (5/7) Epoch 30, batch 1800, loss[loss=0.167, simple_loss=0.2458, pruned_loss=0.04407, over 4763.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.2409, pruned_loss=0.04672, over 956228.41 frames. ], batch size: 28, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:17:57,292 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.55 vs. limit=5.0 +2023-03-27 12:17:58,719 INFO [finetune.py:976] (5/7) Epoch 30, batch 1850, loss[loss=0.1876, simple_loss=0.2663, pruned_loss=0.05446, over 4926.00 frames. ], tot_loss[loss=0.1689, simple_loss=0.243, pruned_loss=0.04744, over 955892.22 frames. ], batch size: 38, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:18:09,501 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.9578, 1.8168, 2.3374, 1.5407, 2.0655, 2.2799, 1.6224, 2.3499], + device='cuda:5'), covar=tensor([0.1369, 0.2181, 0.1503, 0.1999, 0.1050, 0.1549, 0.2957, 0.0873], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0206, 0.0193, 0.0188, 0.0173, 0.0211, 0.0218, 0.0198], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:18:13,648 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.458e+02 1.787e+02 2.144e+02 3.700e+02, threshold=3.573e+02, percent-clipped=1.0 +2023-03-27 12:18:33,561 INFO [finetune.py:976] (5/7) Epoch 30, batch 1900, loss[loss=0.1615, simple_loss=0.2425, pruned_loss=0.04021, over 4807.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2434, pruned_loss=0.04743, over 955456.22 frames. ], batch size: 39, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:18:55,002 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168035.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:18:58,563 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5635, 2.4125, 2.1301, 1.0485, 2.2404, 1.9805, 1.9075, 2.2990], + device='cuda:5'), covar=tensor([0.0874, 0.0751, 0.1509, 0.2161, 0.1367, 0.2288, 0.2133, 0.0920], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0189, 0.0202, 0.0181, 0.0209, 0.0210, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:19:06,250 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7521, 1.7390, 1.4602, 1.8592, 2.1198, 1.8454, 1.4434, 1.4545], + device='cuda:5'), covar=tensor([0.2059, 0.1794, 0.1887, 0.1554, 0.1422, 0.1114, 0.2234, 0.1827], + device='cuda:5'), in_proj_covar=tensor([0.0250, 0.0214, 0.0219, 0.0201, 0.0248, 0.0193, 0.0221, 0.0209], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:19:07,332 INFO [finetune.py:976] (5/7) Epoch 30, batch 1950, loss[loss=0.1753, simple_loss=0.2439, pruned_loss=0.05332, over 4807.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2424, pruned_loss=0.04714, over 957465.50 frames. ], batch size: 40, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:19:14,615 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.40 vs. limit=5.0 +2023-03-27 12:19:19,764 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2491, 2.0351, 1.6647, 0.6825, 1.8021, 1.8692, 1.7263, 1.9637], + device='cuda:5'), covar=tensor([0.0865, 0.0824, 0.1488, 0.1963, 0.1303, 0.2483, 0.2438, 0.0807], + device='cuda:5'), in_proj_covar=tensor([0.0170, 0.0188, 0.0201, 0.0180, 0.0208, 0.0209, 0.0224, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:19:21,548 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168075.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:19:22,074 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.065e+02 1.458e+02 1.758e+02 2.118e+02 3.555e+02, threshold=3.516e+02, percent-clipped=0.0 +2023-03-27 12:19:25,201 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.8392, 1.3128, 0.8256, 1.7684, 2.2781, 1.6210, 1.7611, 1.7464], + device='cuda:5'), covar=tensor([0.1321, 0.1935, 0.1752, 0.1092, 0.1704, 0.1674, 0.1229, 0.1733], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0095, 0.0110, 0.0094, 0.0121, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 12:19:30,463 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168088.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:19:35,819 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168096.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 12:19:40,898 INFO [finetune.py:976] (5/7) Epoch 30, batch 2000, loss[loss=0.1627, simple_loss=0.2257, pruned_loss=0.0499, over 4906.00 frames. ], tot_loss[loss=0.1659, simple_loss=0.2395, pruned_loss=0.04622, over 956669.68 frames. ], batch size: 43, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:19:54,027 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168123.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:20:01,901 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=168136.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:20:13,570 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.33 vs. limit=5.0 +2023-03-27 12:20:14,574 INFO [finetune.py:976] (5/7) Epoch 30, batch 2050, loss[loss=0.1913, simple_loss=0.2495, pruned_loss=0.06654, over 4901.00 frames. ], tot_loss[loss=0.1623, simple_loss=0.2355, pruned_loss=0.04452, over 953730.37 frames. ], batch size: 43, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:20:29,519 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.016e+02 1.484e+02 1.729e+02 2.115e+02 4.273e+02, threshold=3.459e+02, percent-clipped=3.0 +2023-03-27 12:20:45,092 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-03-27 12:20:47,507 INFO [finetune.py:976] (5/7) Epoch 30, batch 2100, loss[loss=0.2366, simple_loss=0.3008, pruned_loss=0.08619, over 4814.00 frames. ], tot_loss[loss=0.1627, simple_loss=0.2356, pruned_loss=0.04484, over 955105.67 frames. ], batch size: 40, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:21:19,173 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168233.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:21:41,932 INFO [finetune.py:976] (5/7) Epoch 30, batch 2150, loss[loss=0.2468, simple_loss=0.3059, pruned_loss=0.09385, over 4830.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2412, pruned_loss=0.04775, over 953728.53 frames. ], batch size: 49, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:21:58,214 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.3782, 1.2309, 1.7518, 1.4673, 1.4695, 3.1258, 1.1622, 1.4257], + device='cuda:5'), covar=tensor([0.1020, 0.1910, 0.1209, 0.1051, 0.1692, 0.0280, 0.1724, 0.1927], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 12:22:00,991 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.551e+02 1.892e+02 2.224e+02 4.404e+02, threshold=3.784e+02, percent-clipped=3.0 +2023-03-27 12:22:12,555 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168294.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:22:18,784 INFO [finetune.py:976] (5/7) Epoch 30, batch 2200, loss[loss=0.1901, simple_loss=0.2714, pruned_loss=0.0544, over 4817.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2431, pruned_loss=0.04823, over 954496.06 frames. ], batch size: 40, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:22:28,991 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.4652, 1.1511, 0.7622, 1.3704, 1.8609, 0.7882, 1.3377, 1.4293], + device='cuda:5'), covar=tensor([0.1473, 0.1889, 0.1634, 0.1164, 0.2014, 0.1914, 0.1321, 0.1759], + device='cuda:5'), in_proj_covar=tensor([0.0090, 0.0094, 0.0109, 0.0094, 0.0120, 0.0093, 0.0098, 0.0089], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:5') +2023-03-27 12:22:53,771 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5612, 1.4228, 1.4363, 0.9205, 1.4856, 1.5005, 1.4899, 1.3584], + device='cuda:5'), covar=tensor([0.0522, 0.0755, 0.0686, 0.0852, 0.1097, 0.0603, 0.0567, 0.1187], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0138, 0.0142, 0.0120, 0.0129, 0.0139, 0.0139, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:23:02,561 INFO [finetune.py:976] (5/7) Epoch 30, batch 2250, loss[loss=0.1335, simple_loss=0.2186, pruned_loss=0.02417, over 4895.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.2441, pruned_loss=0.0485, over 952958.87 frames. ], batch size: 43, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:23:11,442 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5296, 1.3845, 1.4593, 0.9207, 1.5084, 1.4789, 1.5282, 1.2596], + device='cuda:5'), covar=tensor([0.0607, 0.0840, 0.0749, 0.0946, 0.0985, 0.0773, 0.0604, 0.1480], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0138, 0.0142, 0.0119, 0.0129, 0.0139, 0.0139, 0.0163], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:23:17,400 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168375.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:23:17,908 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.029e+01 1.513e+02 1.826e+02 2.132e+02 3.584e+02, threshold=3.652e+02, percent-clipped=0.0 +2023-03-27 12:23:28,075 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168391.0, num_to_drop=1, layers_to_drop={3} +2023-03-27 12:23:36,282 INFO [finetune.py:976] (5/7) Epoch 30, batch 2300, loss[loss=0.1489, simple_loss=0.2216, pruned_loss=0.03806, over 4927.00 frames. ], tot_loss[loss=0.1706, simple_loss=0.2447, pruned_loss=0.04823, over 952128.94 frames. ], batch size: 38, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:23:44,079 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([5.0792, 4.3939, 4.6025, 4.9077, 4.7977, 4.5093, 5.1708, 1.4642], + device='cuda:5'), covar=tensor([0.0772, 0.0794, 0.0803, 0.0916, 0.1212, 0.1625, 0.0539, 0.6041], + device='cuda:5'), in_proj_covar=tensor([0.0355, 0.0248, 0.0289, 0.0299, 0.0339, 0.0288, 0.0309, 0.0304], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:23:49,962 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=168423.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:23:49,984 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168423.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:23:59,638 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.7935, 1.5506, 2.2402, 3.5648, 2.4518, 2.4921, 1.0422, 3.0690], + device='cuda:5'), covar=tensor([0.1713, 0.1320, 0.1307, 0.0549, 0.0735, 0.1634, 0.1836, 0.0425], + device='cuda:5'), in_proj_covar=tensor([0.0100, 0.0115, 0.0133, 0.0164, 0.0100, 0.0136, 0.0125, 0.0101], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003], + device='cuda:5') +2023-03-27 12:24:07,387 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.0153, 1.9703, 1.7253, 2.1472, 2.4122, 2.1729, 1.7927, 1.6090], + device='cuda:5'), covar=tensor([0.2038, 0.1893, 0.1842, 0.1551, 0.1594, 0.1137, 0.2192, 0.1943], + device='cuda:5'), in_proj_covar=tensor([0.0250, 0.0214, 0.0219, 0.0201, 0.0248, 0.0194, 0.0220, 0.0209], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:24:09,557 INFO [finetune.py:976] (5/7) Epoch 30, batch 2350, loss[loss=0.1447, simple_loss=0.222, pruned_loss=0.03369, over 4899.00 frames. ], tot_loss[loss=0.1677, simple_loss=0.2414, pruned_loss=0.04701, over 954849.86 frames. ], batch size: 32, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:24:21,869 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=168471.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:24:24,773 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.258e+01 1.458e+02 1.699e+02 2.116e+02 4.301e+02, threshold=3.398e+02, percent-clipped=1.0 +2023-03-27 12:24:25,809 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.54 vs. limit=5.0 +2023-03-27 12:24:42,042 INFO [finetune.py:976] (5/7) Epoch 30, batch 2400, loss[loss=0.1468, simple_loss=0.2194, pruned_loss=0.03715, over 4847.00 frames. ], tot_loss[loss=0.1653, simple_loss=0.2384, pruned_loss=0.04614, over 954554.15 frames. ], batch size: 44, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:24:49,126 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.8215, 2.6253, 2.0674, 3.0175, 2.6944, 2.3741, 3.2008, 2.7864], + device='cuda:5'), covar=tensor([0.1254, 0.2191, 0.3101, 0.2219, 0.2541, 0.1626, 0.2615, 0.1699], + device='cuda:5'), in_proj_covar=tensor([0.0190, 0.0191, 0.0238, 0.0252, 0.0250, 0.0209, 0.0216, 0.0204], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:25:15,071 INFO [finetune.py:976] (5/7) Epoch 30, batch 2450, loss[loss=0.1793, simple_loss=0.2573, pruned_loss=0.05064, over 4785.00 frames. ], tot_loss[loss=0.1633, simple_loss=0.2358, pruned_loss=0.04543, over 956220.58 frames. ], batch size: 29, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:25:21,625 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5071, 1.3300, 2.1009, 1.7464, 1.5713, 3.4474, 1.2660, 1.4885], + device='cuda:5'), covar=tensor([0.1033, 0.1914, 0.1175, 0.1004, 0.1671, 0.0263, 0.1673, 0.1893], + device='cuda:5'), in_proj_covar=tensor([0.0074, 0.0082, 0.0073, 0.0076, 0.0091, 0.0081, 0.0085, 0.0080], + device='cuda:5'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0005, 0.0004, 0.0005, 0.0005], + device='cuda:5') +2023-03-27 12:25:30,929 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.020e+02 1.487e+02 1.838e+02 2.245e+02 3.083e+02, threshold=3.676e+02, percent-clipped=0.0 +2023-03-27 12:25:39,365 INFO [zipformer.py:1188] (5/7) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168589.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:25:48,868 INFO [finetune.py:976] (5/7) Epoch 30, batch 2500, loss[loss=0.1954, simple_loss=0.2746, pruned_loss=0.05809, over 4855.00 frames. ], tot_loss[loss=0.1646, simple_loss=0.237, pruned_loss=0.04606, over 957432.99 frames. ], batch size: 44, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:26:00,743 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.2603, 1.2864, 1.1237, 1.2525, 1.5370, 1.4593, 1.2280, 1.1717], + device='cuda:5'), covar=tensor([0.0391, 0.0307, 0.0640, 0.0319, 0.0249, 0.0581, 0.0391, 0.0422], + device='cuda:5'), in_proj_covar=tensor([0.0102, 0.0105, 0.0147, 0.0111, 0.0101, 0.0116, 0.0104, 0.0114], + device='cuda:5'), out_proj_covar=tensor([7.9125e-05, 8.0366e-05, 1.1445e-04, 8.3964e-05, 7.8370e-05, 8.5480e-05, + 7.6823e-05, 8.6434e-05], device='cuda:5') +2023-03-27 12:26:14,832 INFO [scaling.py:679] (5/7) Whitening: num_groups=1, num_channels=384, metric=4.77 vs. limit=5.0 +2023-03-27 12:26:27,844 INFO [finetune.py:976] (5/7) Epoch 30, batch 2550, loss[loss=0.2206, simple_loss=0.3023, pruned_loss=0.06946, over 4726.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2424, pruned_loss=0.04787, over 958213.02 frames. ], batch size: 59, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:26:55,339 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.538e+02 1.821e+02 2.163e+02 4.307e+02, threshold=3.641e+02, percent-clipped=2.0 +2023-03-27 12:26:57,246 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.5823, 1.4739, 1.4394, 1.4841, 1.1968, 3.2850, 1.4614, 1.6820], + device='cuda:5'), covar=tensor([0.3876, 0.3028, 0.2394, 0.2901, 0.1766, 0.0299, 0.2721, 0.1250], + device='cuda:5'), in_proj_covar=tensor([0.0132, 0.0116, 0.0120, 0.0124, 0.0113, 0.0095, 0.0094, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 12:27:09,678 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168691.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:27:17,865 INFO [finetune.py:976] (5/7) Epoch 30, batch 2600, loss[loss=0.1911, simple_loss=0.2596, pruned_loss=0.06124, over 4815.00 frames. ], tot_loss[loss=0.1695, simple_loss=0.2432, pruned_loss=0.04796, over 952636.44 frames. ], batch size: 30, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:27:17,992 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.5856, 2.1154, 2.5376, 1.8883, 2.2974, 2.7376, 1.9830, 2.8013], + device='cuda:5'), covar=tensor([0.0900, 0.1812, 0.1177, 0.1650, 0.0838, 0.1112, 0.2585, 0.0615], + device='cuda:5'), in_proj_covar=tensor([0.0188, 0.0204, 0.0190, 0.0187, 0.0172, 0.0209, 0.0216, 0.0195], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:27:36,296 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1111, 2.0341, 1.7555, 1.9571, 1.9234, 1.9116, 1.9302, 2.6674], + device='cuda:5'), covar=tensor([0.3562, 0.3887, 0.3152, 0.3514, 0.4016, 0.2301, 0.3523, 0.1536], + device='cuda:5'), in_proj_covar=tensor([0.0290, 0.0264, 0.0240, 0.0275, 0.0263, 0.0233, 0.0260, 0.0240], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:27:44,435 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=168739.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:28:01,794 INFO [finetune.py:976] (5/7) Epoch 30, batch 2650, loss[loss=0.1133, simple_loss=0.184, pruned_loss=0.0213, over 4702.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.2427, pruned_loss=0.04718, over 952181.54 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:28:21,696 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 1.069e+02 1.510e+02 1.724e+02 1.979e+02 3.263e+02, threshold=3.448e+02, percent-clipped=0.0 +2023-03-27 12:28:28,289 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.6602, 3.8740, 3.6171, 1.8436, 3.9861, 2.9729, 1.0073, 2.7523], + device='cuda:5'), covar=tensor([0.2318, 0.2098, 0.1480, 0.3134, 0.0990, 0.0945, 0.4289, 0.1469], + device='cuda:5'), in_proj_covar=tensor([0.0152, 0.0179, 0.0160, 0.0130, 0.0163, 0.0124, 0.0148, 0.0126], + device='cuda:5'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:5') +2023-03-27 12:28:43,724 INFO [finetune.py:976] (5/7) Epoch 30, batch 2700, loss[loss=0.1464, simple_loss=0.2269, pruned_loss=0.03295, over 4832.00 frames. ], tot_loss[loss=0.1669, simple_loss=0.2409, pruned_loss=0.04649, over 951972.50 frames. ], batch size: 49, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:28:53,845 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([1.6576, 1.5191, 1.4898, 1.5690, 1.2667, 3.3627, 1.4623, 1.6785], + device='cuda:5'), covar=tensor([0.3803, 0.2903, 0.2361, 0.2790, 0.1678, 0.0279, 0.2554, 0.1221], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0116, 0.0120, 0.0123, 0.0113, 0.0094, 0.0093, 0.0094], + device='cuda:5'), out_proj_covar=tensor([0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0004, 0.0005, 0.0004], + device='cuda:5') +2023-03-27 12:29:09,756 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([3.7956, 3.3052, 3.4935, 3.6566, 3.5897, 3.3808, 3.8430, 1.2546], + device='cuda:5'), covar=tensor([0.0835, 0.0903, 0.0966, 0.0949, 0.1174, 0.1552, 0.0957, 0.5658], + device='cuda:5'), in_proj_covar=tensor([0.0354, 0.0248, 0.0287, 0.0298, 0.0337, 0.0287, 0.0307, 0.0303], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:29:17,015 INFO [finetune.py:976] (5/7) Epoch 30, batch 2750, loss[loss=0.1746, simple_loss=0.2387, pruned_loss=0.05521, over 4865.00 frames. ], tot_loss[loss=0.1656, simple_loss=0.2388, pruned_loss=0.04623, over 953891.46 frames. ], batch size: 34, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:29:31,786 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.1541, 1.9902, 2.1866, 1.6501, 2.1423, 2.2542, 2.3358, 1.7619], + device='cuda:5'), covar=tensor([0.0566, 0.0751, 0.0701, 0.0807, 0.0681, 0.0633, 0.0517, 0.1150], + device='cuda:5'), in_proj_covar=tensor([0.0131, 0.0138, 0.0142, 0.0119, 0.0129, 0.0139, 0.0139, 0.0162], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:29:32,259 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.461e+01 1.436e+02 1.670e+02 1.989e+02 2.987e+02, threshold=3.340e+02, percent-clipped=0.0 +2023-03-27 12:29:41,551 INFO [zipformer.py:1188] (5/7) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168889.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:29:50,503 INFO [finetune.py:976] (5/7) Epoch 30, batch 2800, loss[loss=0.1627, simple_loss=0.2328, pruned_loss=0.04632, over 4824.00 frames. ], tot_loss[loss=0.1628, simple_loss=0.2352, pruned_loss=0.04518, over 952730.28 frames. ], batch size: 40, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:30:13,017 INFO [zipformer.py:1188] (5/7) warmup_begin=666.7, warmup_end=1333.3, batch_count=168937.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:30:23,990 INFO [finetune.py:976] (5/7) Epoch 30, batch 2850, loss[loss=0.1876, simple_loss=0.256, pruned_loss=0.05961, over 4737.00 frames. ], tot_loss[loss=0.1646, simple_loss=0.2363, pruned_loss=0.04641, over 952210.47 frames. ], batch size: 59, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:30:33,493 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168967.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:30:34,686 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168969.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 12:30:38,882 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 8.606e+01 1.456e+02 1.728e+02 2.104e+02 5.266e+02, threshold=3.457e+02, percent-clipped=2.0 +2023-03-27 12:30:57,839 INFO [finetune.py:976] (5/7) Epoch 30, batch 2900, loss[loss=0.1676, simple_loss=0.2508, pruned_loss=0.04225, over 4893.00 frames. ], tot_loss[loss=0.1673, simple_loss=0.2398, pruned_loss=0.04741, over 954098.54 frames. ], batch size: 32, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:31:14,730 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169028.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:31:15,945 INFO [zipformer.py:1188] (5/7) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169030.0, num_to_drop=1, layers_to_drop={0} +2023-03-27 12:31:18,992 INFO [zipformer.py:2441] (5/7) attn_weights_entropy = tensor([2.2583, 2.0331, 1.5119, 0.7408, 1.7062, 1.9336, 1.8071, 1.8865], + device='cuda:5'), covar=tensor([0.0825, 0.0702, 0.1337, 0.1744, 0.1254, 0.1929, 0.2063, 0.0772], + device='cuda:5'), in_proj_covar=tensor([0.0171, 0.0188, 0.0201, 0.0180, 0.0209, 0.0210, 0.0224, 0.0196], + device='cuda:5'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:5') +2023-03-27 12:31:31,770 INFO [finetune.py:976] (5/7) Epoch 30, batch 2950, loss[loss=0.1715, simple_loss=0.2507, pruned_loss=0.04619, over 4812.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2437, pruned_loss=0.0483, over 956643.62 frames. ], batch size: 40, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:31:47,449 INFO [scaling.py:679] (5/7) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-03-27 12:31:49,077 INFO [optim.py:369] (5/7) Clipping_scale=2.0, grad-norm quartiles 9.406e+01 1.615e+02 1.887e+02 2.255e+02 4.054e+02, threshold=3.773e+02, percent-clipped=1.0 +2023-03-27 12:31:53,286 INFO [zipformer.py:1188] (5/7) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169082.0, num_to_drop=0, layers_to_drop=set() +2023-03-27 12:32:19,267 INFO [finetune.py:976] (5/7) Epoch 30, batch 3000, loss[loss=0.1627, simple_loss=0.241, pruned_loss=0.04221, over 4807.00 frames. ], tot_loss[loss=0.1698, simple_loss=0.2433, pruned_loss=0.04817, over 955174.43 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 32.0 +2023-03-27 12:32:19,267 INFO [finetune.py:1001] (5/7) Computing validation loss